repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
ethereum/eth-account
|
eth_account/_utils/structured_data/hashing.py
|
encode_type
|
python
|
def encode_type(primary_type, types):
# Getting the dependencies and sorting them alphabetically as per EIP712
deps = get_dependencies(primary_type, types)
sorted_deps = (primary_type,) + tuple(sorted(deps))
result = ''.join(
[
encode_struct(struct_name, types[struct_name])
for struct_name in sorted_deps
]
)
return result
|
The type of a struct is encoded as name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"
where each member is written as type ‖ " " ‖ name.
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L72-L87
|
[
"def get_dependencies(primary_type, types):\n \"\"\"\n Perform DFS to get all the dependencies of the primary_type\n \"\"\"\n deps = set()\n struct_names_yet_to_be_expanded = [primary_type]\n\n while len(struct_names_yet_to_be_expanded) > 0:\n struct_name = struct_names_yet_to_be_expanded.pop()\n\n deps.add(struct_name)\n fields = types[struct_name]\n for field in fields:\n if field[\"type\"] not in types:\n # We don't need to expand types that are not user defined (customized)\n continue\n elif field[\"type\"] in deps:\n # skip types that we have already encountered\n continue\n else:\n # Custom Struct Type\n struct_names_yet_to_be_expanded.append(field[\"type\"])\n\n # Don't need to make a struct as dependency of itself\n deps.remove(primary_type)\n\n return tuple(deps)\n"
] |
from itertools import (
groupby,
)
import json
from operator import (
itemgetter,
)
from eth_abi import (
encode_abi,
is_encodable,
)
from eth_abi.grammar import (
parse,
)
from eth_utils import (
ValidationError,
keccak,
to_tuple,
toolz,
)
from .validation import (
validate_structured_data,
)
def get_dependencies(primary_type, types):
"""
Perform DFS to get all the dependencies of the primary_type
"""
deps = set()
struct_names_yet_to_be_expanded = [primary_type]
while len(struct_names_yet_to_be_expanded) > 0:
struct_name = struct_names_yet_to_be_expanded.pop()
deps.add(struct_name)
fields = types[struct_name]
for field in fields:
if field["type"] not in types:
# We don't need to expand types that are not user defined (customized)
continue
elif field["type"] in deps:
# skip types that we have already encountered
continue
else:
# Custom Struct Type
struct_names_yet_to_be_expanded.append(field["type"])
# Don't need to make a struct as dependency of itself
deps.remove(primary_type)
return tuple(deps)
def field_identifier(field):
"""
Given a ``field`` of the format {'name': NAME, 'type': TYPE},
this function converts it to ``TYPE NAME``
"""
return "{0} {1}".format(field["type"], field["name"])
def encode_struct(struct_name, struct_field_types):
return "{0}({1})".format(
struct_name,
','.join(map(field_identifier, struct_field_types)),
)
def hash_struct_type(primary_type, types):
return keccak(text=encode_type(primary_type, types))
def is_valid_abi_type(type_name):
"""
This function is used to make sure that the ``type_name`` is a valid ABI Type.
Please note that this is a temporary function and should be replaced by the corresponding
ABI function, once the following issue has been resolved.
https://github.com/ethereum/eth-abi/issues/125
"""
valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"}
is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32
is_intN = (
type_name.startswith("int") and
8 <= int(type_name[3:]) <= 256 and
int(type_name[3:]) % 8 == 0
)
is_uintN = (
type_name.startswith("uint") and
8 <= int(type_name[4:]) <= 256 and
int(type_name[4:]) % 8 == 0
)
if type_name in valid_abi_types:
return True
elif is_bytesN:
# bytes1 to bytes32
return True
elif is_intN:
# int8 to int256
return True
elif is_uintN:
# uint8 to uint256
return True
return False
def is_array_type(type):
# Identify if type such as "person[]" or "person[2]" is an array
abi_type = parse(type)
return abi_type.is_array
@to_tuple
def get_depths_and_dimensions(data, depth):
"""
Yields 2-length tuples of depth and dimension of each element at that depth
"""
if not isinstance(data, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
return ()
yield depth, len(data)
for item in data:
# iterating over all 1 dimension less sub-data items
yield from get_depths_and_dimensions(item, depth + 1)
def get_array_dimensions(data):
"""
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
"""
depths_and_dimensions = get_depths_and_dimensions(data, 0)
# re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
# validate that there is only one dimension for any given depth.
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
'\n'.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
@to_tuple
def flatten_multidimensional_array(array):
for item in array:
if not isinstance(item, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
yield from flatten_multidimensional_array(item)
else:
yield item
@to_tuple
def _encode_data(primary_type, types, data):
# Add typehash
yield "bytes32", hash_struct_type(primary_type, types)
# Add field contents
for field in types[primary_type]:
value = data[field["name"]]
if field["type"] == "string":
if not isinstance(value, str):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"string value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(text=value)
yield "bytes32", hashed_value
elif field["type"] == "bytes":
if not isinstance(value, bytes):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"bytes value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(primitive=value)
yield "bytes32", hashed_value
elif field["type"] in types:
# This means that this type is a user defined type
hashed_value = keccak(primitive=encode_data(field["type"], types, value))
yield "bytes32", hashed_value
elif is_array_type(field["type"]):
# Get the dimensions from the value
array_dimensions = get_array_dimensions(value)
# Get the dimensions from what was declared in the schema
parsed_type = parse(field["type"])
for i in range(len(array_dimensions)):
if len(parsed_type.arrlist[i]) == 0:
# Skip empty or dynamically declared dimensions
continue
if array_dimensions[i] != parsed_type.arrlist[i][0]:
# Dimensions should match with declared schema
raise TypeError(
"Array data `{0}` has dimensions `{1}` whereas the "
"schema has dimensions `{2}`".format(
value,
array_dimensions,
tuple(map(lambda x: x[0], parsed_type.arrlist)),
)
)
array_items = flatten_multidimensional_array(value)
array_items_encoding = [
encode_data(parsed_type.base, types, array_item)
for array_item in array_items
]
concatenated_array_encodings = ''.join(array_items_encoding)
hashed_value = keccak(concatenated_array_encodings)
yield "bytes32", hashed_value
else:
# First checking to see if type is valid as per abi
if not is_valid_abi_type(field["type"]):
raise TypeError(
"Received Invalid type `{0}` in the struct `{1}`".format(
field["type"],
primary_type,
)
)
# Next see if the data fits the specified encoding type
if is_encodable(field["type"], value):
# field["type"] is a valid type and this value corresponds to that type.
yield field["type"], value
else:
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"{4} value".format(
field["name"],
primary_type,
value,
type(value),
field["type"],
)
)
def encode_data(primaryType, types, data):
data_types_and_hashes = _encode_data(primaryType, types, data)
data_types, data_hashes = zip(*data_types_and_hashes)
return encode_abi(data_types, data_hashes)
def load_and_validate_structured_message(structured_json_string_data):
structured_data = json.loads(structured_json_string_data)
validate_structured_data(structured_data)
return structured_data
def hash_domain(structured_data):
return keccak(
encode_data(
"EIP712Domain",
structured_data["types"],
structured_data["domain"]
)
)
def hash_message(structured_data):
return keccak(
encode_data(
structured_data["primaryType"],
structured_data["types"],
structured_data["message"]
)
)
|
ethereum/eth-account
|
eth_account/_utils/structured_data/hashing.py
|
is_valid_abi_type
|
python
|
def is_valid_abi_type(type_name):
valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"}
is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32
is_intN = (
type_name.startswith("int") and
8 <= int(type_name[3:]) <= 256 and
int(type_name[3:]) % 8 == 0
)
is_uintN = (
type_name.startswith("uint") and
8 <= int(type_name[4:]) <= 256 and
int(type_name[4:]) % 8 == 0
)
if type_name in valid_abi_types:
return True
elif is_bytesN:
# bytes1 to bytes32
return True
elif is_intN:
# int8 to int256
return True
elif is_uintN:
# uint8 to uint256
return True
return False
|
This function is used to make sure that the ``type_name`` is a valid ABI Type.
Please note that this is a temporary function and should be replaced by the corresponding
ABI function, once the following issue has been resolved.
https://github.com/ethereum/eth-abi/issues/125
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L94-L127
| null |
from itertools import (
groupby,
)
import json
from operator import (
itemgetter,
)
from eth_abi import (
encode_abi,
is_encodable,
)
from eth_abi.grammar import (
parse,
)
from eth_utils import (
ValidationError,
keccak,
to_tuple,
toolz,
)
from .validation import (
validate_structured_data,
)
def get_dependencies(primary_type, types):
"""
Perform DFS to get all the dependencies of the primary_type
"""
deps = set()
struct_names_yet_to_be_expanded = [primary_type]
while len(struct_names_yet_to_be_expanded) > 0:
struct_name = struct_names_yet_to_be_expanded.pop()
deps.add(struct_name)
fields = types[struct_name]
for field in fields:
if field["type"] not in types:
# We don't need to expand types that are not user defined (customized)
continue
elif field["type"] in deps:
# skip types that we have already encountered
continue
else:
# Custom Struct Type
struct_names_yet_to_be_expanded.append(field["type"])
# Don't need to make a struct as dependency of itself
deps.remove(primary_type)
return tuple(deps)
def field_identifier(field):
"""
Given a ``field`` of the format {'name': NAME, 'type': TYPE},
this function converts it to ``TYPE NAME``
"""
return "{0} {1}".format(field["type"], field["name"])
def encode_struct(struct_name, struct_field_types):
return "{0}({1})".format(
struct_name,
','.join(map(field_identifier, struct_field_types)),
)
def encode_type(primary_type, types):
"""
The type of a struct is encoded as name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"
where each member is written as type ‖ " " ‖ name.
"""
# Getting the dependencies and sorting them alphabetically as per EIP712
deps = get_dependencies(primary_type, types)
sorted_deps = (primary_type,) + tuple(sorted(deps))
result = ''.join(
[
encode_struct(struct_name, types[struct_name])
for struct_name in sorted_deps
]
)
return result
def hash_struct_type(primary_type, types):
return keccak(text=encode_type(primary_type, types))
def is_array_type(type):
# Identify if type such as "person[]" or "person[2]" is an array
abi_type = parse(type)
return abi_type.is_array
@to_tuple
def get_depths_and_dimensions(data, depth):
"""
Yields 2-length tuples of depth and dimension of each element at that depth
"""
if not isinstance(data, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
return ()
yield depth, len(data)
for item in data:
# iterating over all 1 dimension less sub-data items
yield from get_depths_and_dimensions(item, depth + 1)
def get_array_dimensions(data):
"""
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
"""
depths_and_dimensions = get_depths_and_dimensions(data, 0)
# re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
# validate that there is only one dimension for any given depth.
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
'\n'.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
@to_tuple
def flatten_multidimensional_array(array):
for item in array:
if not isinstance(item, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
yield from flatten_multidimensional_array(item)
else:
yield item
@to_tuple
def _encode_data(primary_type, types, data):
# Add typehash
yield "bytes32", hash_struct_type(primary_type, types)
# Add field contents
for field in types[primary_type]:
value = data[field["name"]]
if field["type"] == "string":
if not isinstance(value, str):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"string value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(text=value)
yield "bytes32", hashed_value
elif field["type"] == "bytes":
if not isinstance(value, bytes):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"bytes value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(primitive=value)
yield "bytes32", hashed_value
elif field["type"] in types:
# This means that this type is a user defined type
hashed_value = keccak(primitive=encode_data(field["type"], types, value))
yield "bytes32", hashed_value
elif is_array_type(field["type"]):
# Get the dimensions from the value
array_dimensions = get_array_dimensions(value)
# Get the dimensions from what was declared in the schema
parsed_type = parse(field["type"])
for i in range(len(array_dimensions)):
if len(parsed_type.arrlist[i]) == 0:
# Skip empty or dynamically declared dimensions
continue
if array_dimensions[i] != parsed_type.arrlist[i][0]:
# Dimensions should match with declared schema
raise TypeError(
"Array data `{0}` has dimensions `{1}` whereas the "
"schema has dimensions `{2}`".format(
value,
array_dimensions,
tuple(map(lambda x: x[0], parsed_type.arrlist)),
)
)
array_items = flatten_multidimensional_array(value)
array_items_encoding = [
encode_data(parsed_type.base, types, array_item)
for array_item in array_items
]
concatenated_array_encodings = ''.join(array_items_encoding)
hashed_value = keccak(concatenated_array_encodings)
yield "bytes32", hashed_value
else:
# First checking to see if type is valid as per abi
if not is_valid_abi_type(field["type"]):
raise TypeError(
"Received Invalid type `{0}` in the struct `{1}`".format(
field["type"],
primary_type,
)
)
# Next see if the data fits the specified encoding type
if is_encodable(field["type"], value):
# field["type"] is a valid type and this value corresponds to that type.
yield field["type"], value
else:
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"{4} value".format(
field["name"],
primary_type,
value,
type(value),
field["type"],
)
)
def encode_data(primaryType, types, data):
data_types_and_hashes = _encode_data(primaryType, types, data)
data_types, data_hashes = zip(*data_types_and_hashes)
return encode_abi(data_types, data_hashes)
def load_and_validate_structured_message(structured_json_string_data):
structured_data = json.loads(structured_json_string_data)
validate_structured_data(structured_data)
return structured_data
def hash_domain(structured_data):
return keccak(
encode_data(
"EIP712Domain",
structured_data["types"],
structured_data["domain"]
)
)
def hash_message(structured_data):
return keccak(
encode_data(
structured_data["primaryType"],
structured_data["types"],
structured_data["message"]
)
)
|
ethereum/eth-account
|
eth_account/_utils/structured_data/hashing.py
|
get_depths_and_dimensions
|
python
|
def get_depths_and_dimensions(data, depth):
if not isinstance(data, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
return ()
yield depth, len(data)
for item in data:
# iterating over all 1 dimension less sub-data items
yield from get_depths_and_dimensions(item, depth + 1)
|
Yields 2-length tuples of depth and dimension of each element at that depth
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L137-L150
| null |
from itertools import (
groupby,
)
import json
from operator import (
itemgetter,
)
from eth_abi import (
encode_abi,
is_encodable,
)
from eth_abi.grammar import (
parse,
)
from eth_utils import (
ValidationError,
keccak,
to_tuple,
toolz,
)
from .validation import (
validate_structured_data,
)
def get_dependencies(primary_type, types):
"""
Perform DFS to get all the dependencies of the primary_type
"""
deps = set()
struct_names_yet_to_be_expanded = [primary_type]
while len(struct_names_yet_to_be_expanded) > 0:
struct_name = struct_names_yet_to_be_expanded.pop()
deps.add(struct_name)
fields = types[struct_name]
for field in fields:
if field["type"] not in types:
# We don't need to expand types that are not user defined (customized)
continue
elif field["type"] in deps:
# skip types that we have already encountered
continue
else:
# Custom Struct Type
struct_names_yet_to_be_expanded.append(field["type"])
# Don't need to make a struct as dependency of itself
deps.remove(primary_type)
return tuple(deps)
def field_identifier(field):
"""
Given a ``field`` of the format {'name': NAME, 'type': TYPE},
this function converts it to ``TYPE NAME``
"""
return "{0} {1}".format(field["type"], field["name"])
def encode_struct(struct_name, struct_field_types):
return "{0}({1})".format(
struct_name,
','.join(map(field_identifier, struct_field_types)),
)
def encode_type(primary_type, types):
"""
The type of a struct is encoded as name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"
where each member is written as type ‖ " " ‖ name.
"""
# Getting the dependencies and sorting them alphabetically as per EIP712
deps = get_dependencies(primary_type, types)
sorted_deps = (primary_type,) + tuple(sorted(deps))
result = ''.join(
[
encode_struct(struct_name, types[struct_name])
for struct_name in sorted_deps
]
)
return result
def hash_struct_type(primary_type, types):
return keccak(text=encode_type(primary_type, types))
def is_valid_abi_type(type_name):
"""
This function is used to make sure that the ``type_name`` is a valid ABI Type.
Please note that this is a temporary function and should be replaced by the corresponding
ABI function, once the following issue has been resolved.
https://github.com/ethereum/eth-abi/issues/125
"""
valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"}
is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32
is_intN = (
type_name.startswith("int") and
8 <= int(type_name[3:]) <= 256 and
int(type_name[3:]) % 8 == 0
)
is_uintN = (
type_name.startswith("uint") and
8 <= int(type_name[4:]) <= 256 and
int(type_name[4:]) % 8 == 0
)
if type_name in valid_abi_types:
return True
elif is_bytesN:
# bytes1 to bytes32
return True
elif is_intN:
# int8 to int256
return True
elif is_uintN:
# uint8 to uint256
return True
return False
def is_array_type(type):
# Identify if type such as "person[]" or "person[2]" is an array
abi_type = parse(type)
return abi_type.is_array
@to_tuple
def get_array_dimensions(data):
"""
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
"""
depths_and_dimensions = get_depths_and_dimensions(data, 0)
# re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
# validate that there is only one dimension for any given depth.
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
'\n'.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
@to_tuple
def flatten_multidimensional_array(array):
for item in array:
if not isinstance(item, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
yield from flatten_multidimensional_array(item)
else:
yield item
@to_tuple
def _encode_data(primary_type, types, data):
# Add typehash
yield "bytes32", hash_struct_type(primary_type, types)
# Add field contents
for field in types[primary_type]:
value = data[field["name"]]
if field["type"] == "string":
if not isinstance(value, str):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"string value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(text=value)
yield "bytes32", hashed_value
elif field["type"] == "bytes":
if not isinstance(value, bytes):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"bytes value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(primitive=value)
yield "bytes32", hashed_value
elif field["type"] in types:
# This means that this type is a user defined type
hashed_value = keccak(primitive=encode_data(field["type"], types, value))
yield "bytes32", hashed_value
elif is_array_type(field["type"]):
# Get the dimensions from the value
array_dimensions = get_array_dimensions(value)
# Get the dimensions from what was declared in the schema
parsed_type = parse(field["type"])
for i in range(len(array_dimensions)):
if len(parsed_type.arrlist[i]) == 0:
# Skip empty or dynamically declared dimensions
continue
if array_dimensions[i] != parsed_type.arrlist[i][0]:
# Dimensions should match with declared schema
raise TypeError(
"Array data `{0}` has dimensions `{1}` whereas the "
"schema has dimensions `{2}`".format(
value,
array_dimensions,
tuple(map(lambda x: x[0], parsed_type.arrlist)),
)
)
array_items = flatten_multidimensional_array(value)
array_items_encoding = [
encode_data(parsed_type.base, types, array_item)
for array_item in array_items
]
concatenated_array_encodings = ''.join(array_items_encoding)
hashed_value = keccak(concatenated_array_encodings)
yield "bytes32", hashed_value
else:
# First checking to see if type is valid as per abi
if not is_valid_abi_type(field["type"]):
raise TypeError(
"Received Invalid type `{0}` in the struct `{1}`".format(
field["type"],
primary_type,
)
)
# Next see if the data fits the specified encoding type
if is_encodable(field["type"], value):
# field["type"] is a valid type and this value corresponds to that type.
yield field["type"], value
else:
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"{4} value".format(
field["name"],
primary_type,
value,
type(value),
field["type"],
)
)
def encode_data(primaryType, types, data):
data_types_and_hashes = _encode_data(primaryType, types, data)
data_types, data_hashes = zip(*data_types_and_hashes)
return encode_abi(data_types, data_hashes)
def load_and_validate_structured_message(structured_json_string_data):
structured_data = json.loads(structured_json_string_data)
validate_structured_data(structured_data)
return structured_data
def hash_domain(structured_data):
return keccak(
encode_data(
"EIP712Domain",
structured_data["types"],
structured_data["domain"]
)
)
def hash_message(structured_data):
return keccak(
encode_data(
structured_data["primaryType"],
structured_data["types"],
structured_data["message"]
)
)
|
ethereum/eth-account
|
eth_account/_utils/structured_data/hashing.py
|
get_array_dimensions
|
python
|
def get_array_dimensions(data):
depths_and_dimensions = get_depths_and_dimensions(data, 0)
# re-form as a dictionary with `depth` as key, and all of the dimensions found at that depth.
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
# validate that there is only one dimension for any given depth.
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
'\n'.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
|
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/structured_data/hashing.py#L153-L188
| null |
from itertools import (
groupby,
)
import json
from operator import (
itemgetter,
)
from eth_abi import (
encode_abi,
is_encodable,
)
from eth_abi.grammar import (
parse,
)
from eth_utils import (
ValidationError,
keccak,
to_tuple,
toolz,
)
from .validation import (
validate_structured_data,
)
def get_dependencies(primary_type, types):
"""
Perform DFS to get all the dependencies of the primary_type
"""
deps = set()
struct_names_yet_to_be_expanded = [primary_type]
while len(struct_names_yet_to_be_expanded) > 0:
struct_name = struct_names_yet_to_be_expanded.pop()
deps.add(struct_name)
fields = types[struct_name]
for field in fields:
if field["type"] not in types:
# We don't need to expand types that are not user defined (customized)
continue
elif field["type"] in deps:
# skip types that we have already encountered
continue
else:
# Custom Struct Type
struct_names_yet_to_be_expanded.append(field["type"])
# Don't need to make a struct as dependency of itself
deps.remove(primary_type)
return tuple(deps)
def field_identifier(field):
"""
Given a ``field`` of the format {'name': NAME, 'type': TYPE},
this function converts it to ``TYPE NAME``
"""
return "{0} {1}".format(field["type"], field["name"])
def encode_struct(struct_name, struct_field_types):
return "{0}({1})".format(
struct_name,
','.join(map(field_identifier, struct_field_types)),
)
def encode_type(primary_type, types):
"""
The type of a struct is encoded as name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"
where each member is written as type ‖ " " ‖ name.
"""
# Getting the dependencies and sorting them alphabetically as per EIP712
deps = get_dependencies(primary_type, types)
sorted_deps = (primary_type,) + tuple(sorted(deps))
result = ''.join(
[
encode_struct(struct_name, types[struct_name])
for struct_name in sorted_deps
]
)
return result
def hash_struct_type(primary_type, types):
return keccak(text=encode_type(primary_type, types))
def is_valid_abi_type(type_name):
"""
This function is used to make sure that the ``type_name`` is a valid ABI Type.
Please note that this is a temporary function and should be replaced by the corresponding
ABI function, once the following issue has been resolved.
https://github.com/ethereum/eth-abi/issues/125
"""
valid_abi_types = {"address", "bool", "bytes", "int", "string", "uint"}
is_bytesN = type_name.startswith("bytes") and 1 <= int(type_name[5:]) <= 32
is_intN = (
type_name.startswith("int") and
8 <= int(type_name[3:]) <= 256 and
int(type_name[3:]) % 8 == 0
)
is_uintN = (
type_name.startswith("uint") and
8 <= int(type_name[4:]) <= 256 and
int(type_name[4:]) % 8 == 0
)
if type_name in valid_abi_types:
return True
elif is_bytesN:
# bytes1 to bytes32
return True
elif is_intN:
# int8 to int256
return True
elif is_uintN:
# uint8 to uint256
return True
return False
def is_array_type(type):
# Identify if type such as "person[]" or "person[2]" is an array
abi_type = parse(type)
return abi_type.is_array
@to_tuple
def get_depths_and_dimensions(data, depth):
"""
Yields 2-length tuples of depth and dimension of each element at that depth
"""
if not isinstance(data, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
return ()
yield depth, len(data)
for item in data:
# iterating over all 1 dimension less sub-data items
yield from get_depths_and_dimensions(item, depth + 1)
@to_tuple
def flatten_multidimensional_array(array):
for item in array:
if not isinstance(item, (list, tuple)):
# Not checking for Iterable instance, because even Dictionaries and strings
# are considered as iterables, but that's not what we want the condition to be.
yield from flatten_multidimensional_array(item)
else:
yield item
@to_tuple
def _encode_data(primary_type, types, data):
# Add typehash
yield "bytes32", hash_struct_type(primary_type, types)
# Add field contents
for field in types[primary_type]:
value = data[field["name"]]
if field["type"] == "string":
if not isinstance(value, str):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"string value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(text=value)
yield "bytes32", hashed_value
elif field["type"] == "bytes":
if not isinstance(value, bytes):
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"bytes value".format(
field["name"],
primary_type,
value,
type(value),
)
)
# Special case where the values need to be keccak hashed before they are encoded
hashed_value = keccak(primitive=value)
yield "bytes32", hashed_value
elif field["type"] in types:
# This means that this type is a user defined type
hashed_value = keccak(primitive=encode_data(field["type"], types, value))
yield "bytes32", hashed_value
elif is_array_type(field["type"]):
# Get the dimensions from the value
array_dimensions = get_array_dimensions(value)
# Get the dimensions from what was declared in the schema
parsed_type = parse(field["type"])
for i in range(len(array_dimensions)):
if len(parsed_type.arrlist[i]) == 0:
# Skip empty or dynamically declared dimensions
continue
if array_dimensions[i] != parsed_type.arrlist[i][0]:
# Dimensions should match with declared schema
raise TypeError(
"Array data `{0}` has dimensions `{1}` whereas the "
"schema has dimensions `{2}`".format(
value,
array_dimensions,
tuple(map(lambda x: x[0], parsed_type.arrlist)),
)
)
array_items = flatten_multidimensional_array(value)
array_items_encoding = [
encode_data(parsed_type.base, types, array_item)
for array_item in array_items
]
concatenated_array_encodings = ''.join(array_items_encoding)
hashed_value = keccak(concatenated_array_encodings)
yield "bytes32", hashed_value
else:
# First checking to see if type is valid as per abi
if not is_valid_abi_type(field["type"]):
raise TypeError(
"Received Invalid type `{0}` in the struct `{1}`".format(
field["type"],
primary_type,
)
)
# Next see if the data fits the specified encoding type
if is_encodable(field["type"], value):
# field["type"] is a valid type and this value corresponds to that type.
yield field["type"], value
else:
raise TypeError(
"Value of `{0}` ({2}) in the struct `{1}` is of the type `{3}`, but expected "
"{4} value".format(
field["name"],
primary_type,
value,
type(value),
field["type"],
)
)
def encode_data(primaryType, types, data):
data_types_and_hashes = _encode_data(primaryType, types, data)
data_types, data_hashes = zip(*data_types_and_hashes)
return encode_abi(data_types, data_hashes)
def load_and_validate_structured_message(structured_json_string_data):
structured_data = json.loads(structured_json_string_data)
validate_structured_data(structured_data)
return structured_data
def hash_domain(structured_data):
return keccak(
encode_data(
"EIP712Domain",
structured_data["types"],
structured_data["domain"]
)
)
def hash_message(structured_data):
return keccak(
encode_data(
structured_data["primaryType"],
structured_data["types"],
structured_data["message"]
)
)
|
ethereum/eth-account
|
eth_account/_utils/signing.py
|
hash_of_signed_transaction
|
python
|
def hash_of_signed_transaction(txn_obj):
'''
Regenerate the hash of the signed transaction object.
1. Infer the chain ID from the signature
2. Strip out signature from transaction
3. Annotate the transaction with that ID, if available
4. Take the hash of the serialized, unsigned, chain-aware transaction
Chain ID inference and annotation is according to EIP-155
See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md
:return: the hash of the provided transaction, to be signed
'''
(chain_id, _v) = extract_chain_id(txn_obj.v)
unsigned_parts = strip_signature(txn_obj)
if chain_id is None:
signable_transaction = UnsignedTransaction(*unsigned_parts)
else:
extended_transaction = unsigned_parts + [chain_id, 0, 0]
signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction)
return signable_transaction.hash()
|
Regenerate the hash of the signed transaction object.
1. Infer the chain ID from the signature
2. Strip out signature from transaction
3. Annotate the transaction with that ID, if available
4. Take the hash of the serialized, unsigned, chain-aware transaction
Chain ID inference and annotation is according to EIP-155
See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md
:return: the hash of the provided transaction, to be signed
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/signing.py#L96-L117
|
[
"def strip_signature(txn):\n unsigned_parts = itertools.islice(txn, len(UNSIGNED_TRANSACTION_FIELDS))\n return list(unsigned_parts)\n",
"def extract_chain_id(raw_v):\n '''\n Extracts chain ID, according to EIP-155\n @return (chain_id, v)\n '''\n above_id_offset = raw_v - CHAIN_ID_OFFSET\n if above_id_offset < 0:\n if raw_v in {0, 1}:\n return (None, raw_v + V_OFFSET)\n elif raw_v in {27, 28}:\n return (None, raw_v)\n else:\n raise ValueError(\"v %r is invalid, must be one of: 0, 1, 27, 28, 35+\")\n else:\n (chain_id, v_bit) = divmod(above_id_offset, 2)\n return (chain_id, v_bit + V_OFFSET)\n"
] |
from cytoolz import (
curry,
pipe,
)
from eth_utils import (
to_bytes,
to_int,
to_text,
)
from eth_account._utils.structured_data.hashing import (
hash_domain,
hash_message,
load_and_validate_structured_message,
)
from eth_account._utils.transactions import (
ChainAwareUnsignedTransaction,
UnsignedTransaction,
encode_transaction,
serializable_unsigned_transaction_from_dict,
strip_signature,
)
CHAIN_ID_OFFSET = 35
V_OFFSET = 27
# signature versions
PERSONAL_SIGN_VERSION = b'E' # Hex value 0x45
INTENDED_VALIDATOR_SIGN_VERSION = b'\x00' # Hex value 0x00
STRUCTURED_DATA_SIGN_VERSION = b'\x01' # Hex value 0x01
def sign_transaction_dict(eth_key, transaction_dict):
# generate RLP-serializable transaction, with defaults filled
unsigned_transaction = serializable_unsigned_transaction_from_dict(transaction_dict)
transaction_hash = unsigned_transaction.hash()
# detect chain
if isinstance(unsigned_transaction, UnsignedTransaction):
chain_id = None
else:
chain_id = unsigned_transaction.v
# sign with private key
(v, r, s) = sign_transaction_hash(eth_key, transaction_hash, chain_id)
# serialize transaction with rlp
encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))
return (v, r, s, encoded_transaction)
# watch here for updates to signature format:
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-191.md
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md
@curry
def signature_wrapper(message, signature_version, version_specific_data):
if not isinstance(message, bytes):
raise TypeError("Message is of the type {}, expected bytes".format(type(message)))
if not isinstance(signature_version, bytes):
raise TypeError("Signature Version is of the type {}, expected bytes".format(
type(signature_version))
)
if signature_version == PERSONAL_SIGN_VERSION:
preamble = b'\x19Ethereum Signed Message:\n'
size = str(len(message)).encode('utf-8')
return preamble + size + message
elif signature_version == INTENDED_VALIDATOR_SIGN_VERSION:
wallet_address = to_bytes(hexstr=version_specific_data)
if len(wallet_address) != 20:
raise TypeError("Invalid Wallet Address: {}".format(version_specific_data))
wrapped_message = b'\x19' + signature_version + wallet_address + message
return wrapped_message
elif signature_version == STRUCTURED_DATA_SIGN_VERSION:
message_string = to_text(primitive=message)
structured_data = load_and_validate_structured_message(message_string)
domainSeparator = hash_domain(structured_data)
wrapped_message = (
b'\x19' + signature_version + domainSeparator + hash_message(structured_data)
)
return wrapped_message
else:
raise NotImplementedError(
"Currently supported signature versions are: {0}, {1}, {2}. ".
format(
'0x' + INTENDED_VALIDATOR_SIGN_VERSION.hex(),
'0x' + PERSONAL_SIGN_VERSION.hex(),
'0x' + STRUCTURED_DATA_SIGN_VERSION.hex(),
) +
"But received signature version {}".format('0x' + signature_version.hex())
)
def extract_chain_id(raw_v):
'''
Extracts chain ID, according to EIP-155
@return (chain_id, v)
'''
above_id_offset = raw_v - CHAIN_ID_OFFSET
if above_id_offset < 0:
if raw_v in {0, 1}:
return (None, raw_v + V_OFFSET)
elif raw_v in {27, 28}:
return (None, raw_v)
else:
raise ValueError("v %r is invalid, must be one of: 0, 1, 27, 28, 35+")
else:
(chain_id, v_bit) = divmod(above_id_offset, 2)
return (chain_id, v_bit + V_OFFSET)
def to_standard_signature_bytes(ethereum_signature_bytes):
rs = ethereum_signature_bytes[:-1]
v = to_int(ethereum_signature_bytes[-1])
standard_v = to_standard_v(v)
return rs + to_bytes(standard_v)
def to_standard_v(enhanced_v):
(_chain, chain_naive_v) = extract_chain_id(enhanced_v)
v_standard = chain_naive_v - V_OFFSET
assert v_standard in {0, 1}
return v_standard
def to_eth_v(v_raw, chain_id=None):
if chain_id is None:
v = v_raw + V_OFFSET
else:
v = v_raw + CHAIN_ID_OFFSET + 2 * chain_id
return v
def sign_transaction_hash(account, transaction_hash, chain_id):
signature = account.sign_msg_hash(transaction_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw, chain_id)
return (v, r, s)
def _pad_to_eth_word(bytes_val):
return bytes_val.rjust(32, b'\0')
def to_bytes32(val):
return pipe(
val,
to_bytes,
_pad_to_eth_word,
)
def sign_message_hash(key, msg_hash):
signature = key.sign_msg_hash(msg_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw)
eth_signature_bytes = to_bytes32(r) + to_bytes32(s) + to_bytes(v)
return (v, r, s, eth_signature_bytes)
|
ethereum/eth-account
|
eth_account/_utils/signing.py
|
extract_chain_id
|
python
|
def extract_chain_id(raw_v):
'''
Extracts chain ID, according to EIP-155
@return (chain_id, v)
'''
above_id_offset = raw_v - CHAIN_ID_OFFSET
if above_id_offset < 0:
if raw_v in {0, 1}:
return (None, raw_v + V_OFFSET)
elif raw_v in {27, 28}:
return (None, raw_v)
else:
raise ValueError("v %r is invalid, must be one of: 0, 1, 27, 28, 35+")
else:
(chain_id, v_bit) = divmod(above_id_offset, 2)
return (chain_id, v_bit + V_OFFSET)
|
Extracts chain ID, according to EIP-155
@return (chain_id, v)
|
train
|
https://github.com/ethereum/eth-account/blob/335199b815ae34fea87f1523e2f29777fd52946e/eth_account/_utils/signing.py#L120-L135
| null |
from cytoolz import (
curry,
pipe,
)
from eth_utils import (
to_bytes,
to_int,
to_text,
)
from eth_account._utils.structured_data.hashing import (
hash_domain,
hash_message,
load_and_validate_structured_message,
)
from eth_account._utils.transactions import (
ChainAwareUnsignedTransaction,
UnsignedTransaction,
encode_transaction,
serializable_unsigned_transaction_from_dict,
strip_signature,
)
CHAIN_ID_OFFSET = 35
V_OFFSET = 27
# signature versions
PERSONAL_SIGN_VERSION = b'E' # Hex value 0x45
INTENDED_VALIDATOR_SIGN_VERSION = b'\x00' # Hex value 0x00
STRUCTURED_DATA_SIGN_VERSION = b'\x01' # Hex value 0x01
def sign_transaction_dict(eth_key, transaction_dict):
# generate RLP-serializable transaction, with defaults filled
unsigned_transaction = serializable_unsigned_transaction_from_dict(transaction_dict)
transaction_hash = unsigned_transaction.hash()
# detect chain
if isinstance(unsigned_transaction, UnsignedTransaction):
chain_id = None
else:
chain_id = unsigned_transaction.v
# sign with private key
(v, r, s) = sign_transaction_hash(eth_key, transaction_hash, chain_id)
# serialize transaction with rlp
encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))
return (v, r, s, encoded_transaction)
# watch here for updates to signature format:
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-191.md
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md
@curry
def signature_wrapper(message, signature_version, version_specific_data):
if not isinstance(message, bytes):
raise TypeError("Message is of the type {}, expected bytes".format(type(message)))
if not isinstance(signature_version, bytes):
raise TypeError("Signature Version is of the type {}, expected bytes".format(
type(signature_version))
)
if signature_version == PERSONAL_SIGN_VERSION:
preamble = b'\x19Ethereum Signed Message:\n'
size = str(len(message)).encode('utf-8')
return preamble + size + message
elif signature_version == INTENDED_VALIDATOR_SIGN_VERSION:
wallet_address = to_bytes(hexstr=version_specific_data)
if len(wallet_address) != 20:
raise TypeError("Invalid Wallet Address: {}".format(version_specific_data))
wrapped_message = b'\x19' + signature_version + wallet_address + message
return wrapped_message
elif signature_version == STRUCTURED_DATA_SIGN_VERSION:
message_string = to_text(primitive=message)
structured_data = load_and_validate_structured_message(message_string)
domainSeparator = hash_domain(structured_data)
wrapped_message = (
b'\x19' + signature_version + domainSeparator + hash_message(structured_data)
)
return wrapped_message
else:
raise NotImplementedError(
"Currently supported signature versions are: {0}, {1}, {2}. ".
format(
'0x' + INTENDED_VALIDATOR_SIGN_VERSION.hex(),
'0x' + PERSONAL_SIGN_VERSION.hex(),
'0x' + STRUCTURED_DATA_SIGN_VERSION.hex(),
) +
"But received signature version {}".format('0x' + signature_version.hex())
)
def hash_of_signed_transaction(txn_obj):
'''
Regenerate the hash of the signed transaction object.
1. Infer the chain ID from the signature
2. Strip out signature from transaction
3. Annotate the transaction with that ID, if available
4. Take the hash of the serialized, unsigned, chain-aware transaction
Chain ID inference and annotation is according to EIP-155
See details at https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md
:return: the hash of the provided transaction, to be signed
'''
(chain_id, _v) = extract_chain_id(txn_obj.v)
unsigned_parts = strip_signature(txn_obj)
if chain_id is None:
signable_transaction = UnsignedTransaction(*unsigned_parts)
else:
extended_transaction = unsigned_parts + [chain_id, 0, 0]
signable_transaction = ChainAwareUnsignedTransaction(*extended_transaction)
return signable_transaction.hash()
def to_standard_signature_bytes(ethereum_signature_bytes):
rs = ethereum_signature_bytes[:-1]
v = to_int(ethereum_signature_bytes[-1])
standard_v = to_standard_v(v)
return rs + to_bytes(standard_v)
def to_standard_v(enhanced_v):
(_chain, chain_naive_v) = extract_chain_id(enhanced_v)
v_standard = chain_naive_v - V_OFFSET
assert v_standard in {0, 1}
return v_standard
def to_eth_v(v_raw, chain_id=None):
if chain_id is None:
v = v_raw + V_OFFSET
else:
v = v_raw + CHAIN_ID_OFFSET + 2 * chain_id
return v
def sign_transaction_hash(account, transaction_hash, chain_id):
signature = account.sign_msg_hash(transaction_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw, chain_id)
return (v, r, s)
def _pad_to_eth_word(bytes_val):
return bytes_val.rjust(32, b'\0')
def to_bytes32(val):
return pipe(
val,
to_bytes,
_pad_to_eth_word,
)
def sign_message_hash(key, msg_hash):
signature = key.sign_msg_hash(msg_hash)
(v_raw, r, s) = signature.vrs
v = to_eth_v(v_raw)
eth_signature_bytes = to_bytes32(r) + to_bytes32(s) + to_bytes(v)
return (v, r, s, eth_signature_bytes)
|
evansde77/dockerstache
|
src/dockerstache/dockerstache.py
|
run
|
python
|
def run(**options):
with Dotfile(options) as conf:
if conf['context'] is None:
msg = "No context file has been provided"
LOGGER.error(msg)
raise RuntimeError(msg)
if not os.path.exists(conf['context_path']):
msg = "Context file {} not found".format(conf['context_path'])
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(
(
"{{dockerstache}}: In: {}\n"
"{{dockerstache}}: Out: {}\n"
"{{dockerstache}}: Context: {}\n"
"{{dockerstache}}: Defaults: {}\n"
).format(conf['input'], conf['output'], conf['context'], conf['defaults'])
)
context = Context(conf['context'], conf['defaults'])
context.load()
if 'extend_context' in options:
LOGGER.info("{{dockerstache}} Extended context provided")
context.update(options['extend_context'])
process_templates(
conf['input'],
conf['output'],
context
)
if conf['inclusive']:
process_copies(
conf['input'],
conf['output'],
conf['exclude']
)
return dict(conf)
|
_run_
Run the dockerstache process to render templates
based on the options provided
If extend_context is passed as options it will be used to
extend the context with the contents of the dictionary provided
via context.update(extend_context)
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/dockerstache.py#L18-L64
| null |
#!/usr/bin/env python
"""
_dockerstache_
Main function to invoke dockerstache as a lib call
"""
import os
from .dotfile import Dotfile
from .templates import process_templates, process_copies
from .context import Context
from . import get_logger
LOGGER = get_logger()
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
dir_visitor
|
python
|
def dir_visitor(dirname, visitor):
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
|
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L18-L33
|
[
"def dir_visitor(dirname, visitor):\n \"\"\"\n _dir_visitor_\n\n walk through all files in dirname, find\n directories and call the callable on them.\n\n :param dirname: Name of directory to start visiting,\n all subdirs will be visited\n :param visitor: Callable invoked on each dir visited\n \"\"\"\n visitor(dirname)\n for obj in os.listdir(dirname):\n obj_path = os.path.join(dirname, obj)\n if os.path.isdir(obj_path):\n dir_visitor(obj_path, visitor)\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
replicate_directory_tree
|
python
|
def replicate_directory_tree(input_dir, output_dir):
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
|
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L36-L56
|
[
"def dir_visitor(dirname, visitor):\n \"\"\"\n _dir_visitor_\n\n walk through all files in dirname, find\n directories and call the callable on them.\n\n :param dirname: Name of directory to start visiting,\n all subdirs will be visited\n :param visitor: Callable invoked on each dir visited\n \"\"\"\n visitor(dirname)\n for obj in os.listdir(dirname):\n obj_path = os.path.join(dirname, obj)\n if os.path.isdir(obj_path):\n dir_visitor(obj_path, visitor)\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
find_templates
|
python
|
def find_templates(input_dir):
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
|
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L59-L81
|
[
"def dir_visitor(dirname, visitor):\n \"\"\"\n _dir_visitor_\n\n walk through all files in dirname, find\n directories and call the callable on them.\n\n :param dirname: Name of directory to start visiting,\n all subdirs will be visited\n :param visitor: Callable invoked on each dir visited\n \"\"\"\n visitor(dirname)\n for obj in os.listdir(dirname):\n obj_path = os.path.join(dirname, obj)\n if os.path.isdir(obj_path):\n dir_visitor(obj_path, visitor)\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
find_copies
|
python
|
def find_copies(input_dir, exclude_list):
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
|
find files that are not templates and not
in the exclude_list for copying from template to image
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L84-L107
|
[
"def dir_visitor(dirname, visitor):\n \"\"\"\n _dir_visitor_\n\n walk through all files in dirname, find\n directories and call the callable on them.\n\n :param dirname: Name of directory to start visiting,\n all subdirs will be visited\n :param visitor: Callable invoked on each dir visited\n \"\"\"\n visitor(dirname)\n for obj in os.listdir(dirname):\n obj_path = os.path.join(dirname, obj)\n if os.path.isdir(obj_path):\n dir_visitor(obj_path, visitor)\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
render_template
|
python
|
def render_template(template_in, file_out, context):
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
|
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L110-L126
| null |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
copy_file
|
python
|
def copy_file(src, target):
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
|
copy_file
copy source to target
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L129-L138
| null |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
process_templates
|
python
|
def process_templates(input_dir, target_dir, context):
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
|
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L141-L161
|
[
"def replicate_directory_tree(input_dir, output_dir):\n \"\"\"\n _replicate_directory_tree_\n\n clone dir structure under input_dir into output dir\n All subdirs beneath input_dir will be created under\n output_dir\n :param input_dir: path to dir tree to be cloned\n :param output_dir: path to new dir where dir structure will\n be created\n \"\"\"\n def transplant_dir(target, dirname):\n x = dirname.replace(input_dir, target)\n if not os.path.exists(x):\n LOGGER.info('Creating: {}'.format(x))\n os.makedirs(x)\n\n dir_visitor(\n input_dir,\n functools.partial(transplant_dir, output_dir)\n )\n",
"def find_templates(input_dir):\n \"\"\"\n _find_templates_\n\n traverse the input_dir structure and return a list\n of template files ending with .mustache\n\n :param input_dir: Path to start recursive search for\n mustache templates\n :returns: List of file paths corresponding to templates\n \"\"\"\n templates = []\n\n def template_finder(result, dirname):\n for obj in os.listdir(dirname):\n if obj.endswith('.mustache'):\n result.append(os.path.join(dirname, obj))\n\n dir_visitor(\n input_dir,\n functools.partial(template_finder, templates)\n )\n return templates\n",
"def render_template(template_in, file_out, context):\n \"\"\"\n _render_template_\n\n Render a single template file, using the context provided\n and write the file out to the location specified\n\n #TODO: verify the template is completely rendered, no\n missing values\n\n \"\"\"\n renderer = pystache.Renderer()\n result = renderer.render_path(template_in, context)\n with open(file_out, 'w') as handle:\n LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))\n handle.write(result)\n shutil.copymode(template_in, file_out)\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_copies(input_dir, target_dir, excludes):
"""
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
"""
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
evansde77/dockerstache
|
src/dockerstache/templates.py
|
process_copies
|
python
|
def process_copies(input_dir, target_dir, excludes):
copies = find_copies(input_dir, excludes)
for c in copies:
output_file = c.replace(input_dir, target_dir)
copy_file(c, output_file)
|
_process_copies_
Handles files to be copied across, assumes
that dir structure has already been replicated
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/templates.py#L164-L175
|
[
"def copy_file(src, target):\n \"\"\"\n copy_file\n\n copy source to target\n\n \"\"\"\n LOGGER.info(\"Copying {} to {}\".format(src, target))\n shutil.copyfile(src, target)\n shutil.copymode(src, target)\n",
"def find_copies(input_dir, exclude_list):\n \"\"\"\n find files that are not templates and not\n in the exclude_list for copying from template to image\n \"\"\"\n copies = []\n\n def copy_finder(copies, dirname):\n for obj in os.listdir(dirname):\n pathname = os.path.join(dirname, obj)\n if os.path.isdir(pathname):\n continue\n if obj in exclude_list:\n continue\n if obj.endswith('.mustache'):\n continue\n\n copies.append(os.path.join(dirname, obj))\n\n dir_visitor(\n input_dir,\n functools.partial(copy_finder, copies)\n )\n return copies\n"
] |
#!/usr/bin/env python
"""
_templates_
Find templates, render templates etc
"""
import os
import functools
import pystache
import shutil
from . import get_logger
LOGGER = get_logger()
def dir_visitor(dirname, visitor):
"""
_dir_visitor_
walk through all files in dirname, find
directories and call the callable on them.
:param dirname: Name of directory to start visiting,
all subdirs will be visited
:param visitor: Callable invoked on each dir visited
"""
visitor(dirname)
for obj in os.listdir(dirname):
obj_path = os.path.join(dirname, obj)
if os.path.isdir(obj_path):
dir_visitor(obj_path, visitor)
def replicate_directory_tree(input_dir, output_dir):
"""
_replicate_directory_tree_
clone dir structure under input_dir into output dir
All subdirs beneath input_dir will be created under
output_dir
:param input_dir: path to dir tree to be cloned
:param output_dir: path to new dir where dir structure will
be created
"""
def transplant_dir(target, dirname):
x = dirname.replace(input_dir, target)
if not os.path.exists(x):
LOGGER.info('Creating: {}'.format(x))
os.makedirs(x)
dir_visitor(
input_dir,
functools.partial(transplant_dir, output_dir)
)
def find_templates(input_dir):
"""
_find_templates_
traverse the input_dir structure and return a list
of template files ending with .mustache
:param input_dir: Path to start recursive search for
mustache templates
:returns: List of file paths corresponding to templates
"""
templates = []
def template_finder(result, dirname):
for obj in os.listdir(dirname):
if obj.endswith('.mustache'):
result.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(template_finder, templates)
)
return templates
def find_copies(input_dir, exclude_list):
"""
find files that are not templates and not
in the exclude_list for copying from template to image
"""
copies = []
def copy_finder(copies, dirname):
for obj in os.listdir(dirname):
pathname = os.path.join(dirname, obj)
if os.path.isdir(pathname):
continue
if obj in exclude_list:
continue
if obj.endswith('.mustache'):
continue
copies.append(os.path.join(dirname, obj))
dir_visitor(
input_dir,
functools.partial(copy_finder, copies)
)
return copies
def render_template(template_in, file_out, context):
"""
_render_template_
Render a single template file, using the context provided
and write the file out to the location specified
#TODO: verify the template is completely rendered, no
missing values
"""
renderer = pystache.Renderer()
result = renderer.render_path(template_in, context)
with open(file_out, 'w') as handle:
LOGGER.info('Rendering: {} to {}'.format(template_in, file_out))
handle.write(result)
shutil.copymode(template_in, file_out)
def copy_file(src, target):
"""
copy_file
copy source to target
"""
LOGGER.info("Copying {} to {}".format(src, target))
shutil.copyfile(src, target)
shutil.copymode(src, target)
def process_templates(input_dir, target_dir, context):
"""
_process_templates_
Given the input dir containing a set of template,
clone the structure under that directory into the target dir
using the context to process any mustache templates that
are encountered
"""
if not target_dir.endswith('/'):
target_dir = "{}/".format(target_dir)
if not os.path.exists(target_dir):
LOGGER.info('Creating: {}'.format(target_dir))
os.makedirs(target_dir)
replicate_directory_tree(input_dir, target_dir)
templates = find_templates(input_dir)
for templ in templates:
output_file = templ.replace(input_dir, target_dir)
output_file = output_file[:-len('.mustache')]
render_template(templ, output_file, context)
|
evansde77/dockerstache
|
setup.py
|
get_default
|
python
|
def get_default(parser, section, option, default):
try:
result = parser.get(section, option)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
result = default
return result
|
helper to get config settings with a default if not present
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/setup.py#L16-L22
| null |
"""
_setup.py_
Cirrus template setup.py that reads most of its business
from the cirrus.conf file. This lightweight setup.py should be used by
projects managed with cirrus.
"""
import setuptools
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
# Build a parser and fetch setup options
parser = ConfigParser.RawConfigParser()
parser.read('cirrus.conf')
src_dir = get_default(parser, 'package', 'find_packages', '.')
excl_dirs = get_default(parser, 'package', 'exclude_packages', [])
requirements_filename = get_default(
parser,
'build',
'requirements-file',
'requirements.txt'
)
requirements_file = open(requirements_filename)
# Manually parse the requirements file. Pip 1.5.6 to 6.0 has a function
# behavior change for pip.req.parse_requirements. You must use the setuptools
# format when specifying requirements.
# - https://pythonhosted.org/setuptools/setuptools.html#declaring-dependencies
# Furthermore, you can't use line continuations with the following:
requirements = requirements_file.read().strip().split('\n')
setup_args ={
'description': parser.get('package', 'description'),
'include_package_data': True,
'install_requires': requirements,
'name': parser.get('package', 'name'),
'version': parser.get('package', 'version'),
'url': get_default(parser, 'package', 'url', None),
'author': get_default(parser, 'package','author', None),
'author_email': get_default(parser, 'package','author_email', None)
}
if parser.has_section('console_scripts'):
scripts = [
'{0} = {1}'.format(opt, parser.get('console_scripts', opt))
for opt in parser.options('console_scripts')
]
setup_args['entry_points'] = {'console_scripts' : scripts}
if src_dir:
setup_args['packages'] = setuptools.find_packages(src_dir, exclude=excl_dirs)
setup_args['provides'] = setuptools.find_packages(src_dir)
setup_args['package_dir'] = {'':src_dir}
setuptools.setup(**setup_args)
|
evansde77/dockerstache
|
src/dockerstache/__main__.py
|
build_parser
|
python
|
def build_parser():
parser = argparse.ArgumentParser(
description='dockerstache templating util'
)
parser.add_argument(
'--output', '-o',
help='Working directory to render dockerfile and templates',
dest='output',
default=None
)
parser.add_argument(
'--input', '-i',
help='Working directory containing dockerfile and script mustache templates',
dest='input',
default=os.getcwd()
)
parser.add_argument(
'--context', '-c',
help='JSON file containing context dictionary to render templates',
dest='context',
default=None
)
parser.add_argument(
'--defaults', '-d',
help='JSON file containing default context dictionary to render templates',
dest='defaults',
default=None
)
parser.add_argument(
'--inclusive',
help='include non .mustache files from template',
default=False,
action='store_true'
)
parser.add_argument(
'--exclude', '-e',
help='exclude files from template in this list',
default=[],
nargs='+'
)
opts = parser.parse_args()
return vars(opts)
|
_build_parser_
Set up CLI parser options, parse the
CLI options an return the parsed results
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/__main__.py#L17-L65
| null |
#!/usr/bin/env python
"""
_dockerstache_
"""
import os
import sys
import argparse
from . import get_logger
from .dockerstache import run
LOGGER = get_logger()
def main():
"""
_main_
Create a CLI parser and use that to run
the template rendering process
"""
options = build_parser()
try:
run(**options)
except RuntimeError as ex:
msg = (
"An error occurred running dockerstache: {} "
"please see logging info above for details"
).format(ex)
LOGGER.error(msg)
sys.exit(1)
if __name__ == '__main__':
main()
|
evansde77/dockerstache
|
src/dockerstache/__main__.py
|
main
|
python
|
def main():
options = build_parser()
try:
run(**options)
except RuntimeError as ex:
msg = (
"An error occurred running dockerstache: {} "
"please see logging info above for details"
).format(ex)
LOGGER.error(msg)
sys.exit(1)
|
_main_
Create a CLI parser and use that to run
the template rendering process
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/__main__.py#L68-L86
|
[
"def run(**options):\n \"\"\"\n _run_\n\n Run the dockerstache process to render templates\n based on the options provided\n\n If extend_context is passed as options it will be used to\n extend the context with the contents of the dictionary provided\n via context.update(extend_context)\n\n \"\"\"\n with Dotfile(options) as conf:\n if conf['context'] is None:\n msg = \"No context file has been provided\"\n LOGGER.error(msg)\n raise RuntimeError(msg)\n if not os.path.exists(conf['context_path']):\n msg = \"Context file {} not found\".format(conf['context_path'])\n LOGGER.error(msg)\n raise RuntimeError(msg)\n LOGGER.info(\n (\n \"{{dockerstache}}: In: {}\\n\"\n \"{{dockerstache}}: Out: {}\\n\"\n \"{{dockerstache}}: Context: {}\\n\"\n \"{{dockerstache}}: Defaults: {}\\n\"\n ).format(conf['input'], conf['output'], conf['context'], conf['defaults'])\n )\n context = Context(conf['context'], conf['defaults'])\n context.load()\n if 'extend_context' in options:\n LOGGER.info(\"{{dockerstache}} Extended context provided\")\n context.update(options['extend_context'])\n\n process_templates(\n conf['input'],\n conf['output'],\n context\n )\n if conf['inclusive']:\n process_copies(\n conf['input'],\n conf['output'],\n conf['exclude']\n )\n return dict(conf)\n",
"def build_parser():\n \"\"\"\n _build_parser_\n\n Set up CLI parser options, parse the\n CLI options an return the parsed results\n\n \"\"\"\n parser = argparse.ArgumentParser(\n description='dockerstache templating util'\n )\n parser.add_argument(\n '--output', '-o',\n help='Working directory to render dockerfile and templates',\n dest='output',\n default=None\n )\n parser.add_argument(\n '--input', '-i',\n help='Working directory containing dockerfile and script mustache templates',\n dest='input',\n default=os.getcwd()\n )\n parser.add_argument(\n '--context', '-c',\n help='JSON file containing context dictionary to render templates',\n dest='context',\n default=None\n )\n parser.add_argument(\n '--defaults', '-d',\n help='JSON file containing default context dictionary to render templates',\n dest='defaults',\n default=None\n )\n parser.add_argument(\n '--inclusive',\n help='include non .mustache files from template',\n default=False,\n action='store_true'\n )\n parser.add_argument(\n '--exclude', '-e',\n help='exclude files from template in this list',\n default=[],\n nargs='+'\n )\n opts = parser.parse_args()\n return vars(opts)\n"
] |
#!/usr/bin/env python
"""
_dockerstache_
"""
import os
import sys
import argparse
from . import get_logger
from .dockerstache import run
LOGGER = get_logger()
def build_parser():
"""
_build_parser_
Set up CLI parser options, parse the
CLI options an return the parsed results
"""
parser = argparse.ArgumentParser(
description='dockerstache templating util'
)
parser.add_argument(
'--output', '-o',
help='Working directory to render dockerfile and templates',
dest='output',
default=None
)
parser.add_argument(
'--input', '-i',
help='Working directory containing dockerfile and script mustache templates',
dest='input',
default=os.getcwd()
)
parser.add_argument(
'--context', '-c',
help='JSON file containing context dictionary to render templates',
dest='context',
default=None
)
parser.add_argument(
'--defaults', '-d',
help='JSON file containing default context dictionary to render templates',
dest='defaults',
default=None
)
parser.add_argument(
'--inclusive',
help='include non .mustache files from template',
default=False,
action='store_true'
)
parser.add_argument(
'--exclude', '-e',
help='exclude files from template in this list',
default=[],
nargs='+'
)
opts = parser.parse_args()
return vars(opts)
if __name__ == '__main__':
main()
|
evansde77/dockerstache
|
src/dockerstache/__init__.py
|
setup_logger
|
python
|
def setup_logger():
logger = logging.getLogger('dockerstache')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
|
setup basic logger
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/__init__.py#L31-L40
| null |
#!/usr/bin/env python
"""
_dockerstache_
util package for rendering docker files from mustache templates
"""
import sys
import logging
import logging.handlers
__version__ = "0.0.14"
_LOGGER = {
"logger": None
}
def get_logger():
"""
_get_logger_
Get package logger instance
"""
if _LOGGER['logger'] is None:
_LOGGER['logger'] = setup_logger()
return _LOGGER['logger']
|
evansde77/dockerstache
|
src/dockerstache/context.py
|
Context.load
|
python
|
def load(self):
if self._defaults_file is not None:
if not os.path.exists(self._defaults_file):
msg = "Unable to find defaults file: {}".format(self._defaults_file)
LOGGER.error(msg)
raise RuntimeError(msg)
with open(self._defaults_file, 'r') as handle:
self._defaults = json.load(handle)
self.update(self._defaults)
if self._settings_file is None:
msg = "No context file has been provided"
LOGGER.error(msg)
raise RuntimeError(msg)
if not os.path.exists(self._settings_file):
msg = "Unable to find settings file: {}".format(self._settings_file)
LOGGER.error(msg)
raise RuntimeError(msg)
with open(self._settings_file, 'r') as handle:
settings = json.load(handle)
update(self, settings)
return
|
_load_
Load the defaults file if specified
and overlay the json file on top of that
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/context.py#L46-L75
|
[
"def update(d, u):\n \"\"\"recursive dictionary merge helper\"\"\"\n for k, v in six.iteritems(u):\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n"
] |
class Context(dict):
"""
_Context_
Util wrapper around a dict to load json files in
precedence and build a dictionary for rendering
templates
"""
def __init__(self, jsonfile=None, defaultfile=None):
super(Context, self).__init__()
self._defaults = {}
self._defaults_file = defaultfile
self._settings_file = jsonfile
|
evansde77/dockerstache
|
src/dockerstache/dotfile.py
|
execute_command
|
python
|
def execute_command(working_dir, cmd, env_dict):
proc_env = os.environ.copy()
proc_env["PATH"] = "{}:{}:.".format(proc_env["PATH"], working_dir)
proc_env.update(env_dict)
proc = subprocess.Popen(
cmd,
cwd=working_dir,
env=proc_env,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
status = proc.wait()
stdout, stderr = proc.communicate()
if status:
msg = (
"Non zero {} exit from command {}\n"
"Stdout: {}\n"
"Stderr: {}\n"
).format(status, cmd, stdout, stderr)
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(stdout)
|
execute_command: run the command provided in the working dir
specified adding the env_dict settings to the
execution environment
:param working_dir: path to directory to execute command
also gets added to the PATH
:param cmd: Shell command to execute
:param env_dict: dictionary of additional env vars to
be passed to the subprocess environment
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/dotfile.py#L19-L53
| null |
#!/usr/bin/env python
"""
dotfile
Utils for reading the .dockerstache file in a template dir and looking
for config/actions found inside it
"""
import os
import six
import json
import subprocess
from . import get_logger
LOGGER = get_logger()
def absolute_path(p):
result = p
if not os.path.isabs(p):
result = os.path.join(os.getcwd(), p)
return os.path.normpath(result)
class Dotfile(dict):
"""
object to encapsulate access to the .dockerstache
file in a template
"""
def __init__(self, opts):
super(Dotfile, self).__init__()
self.options = opts
self.template_dir = opts['input']
self.dot_file = os.path.join(self.template_dir, '.dockerstache')
self.setdefault('pre_script', None)
self.setdefault('post_script', None)
self.setdefault('context', None)
self.setdefault('defaults', None)
self.setdefault('output', None)
self.setdefault('output_path', None)
self.setdefault('context_path', None)
self.setdefault('defaults_path', None)
self.setdefault('input', self.template_dir)
self.setdefault('input_path', None)
self.setdefault('inclusive', False)
self.setdefault('exclude', [])
def exists(self):
"""check dotfile exists"""
return os.path.exists(self.dot_file)
def load(self):
"""
read dotfile and populate self
opts will override the dotfile settings,
make sure everything is synced in both
opts and this object
"""
if self.exists():
with open(self.dot_file, 'r') as handle:
self.update(json.load(handle))
if self.options['context'] is not None:
self['context'] = self.options['context']
else:
self.options['context'] = self['context']
if self.options['defaults'] is not None:
self['defaults'] = self.options['defaults']
else:
self.options['defaults'] = self['defaults']
if self.options['output'] is not None:
self['output'] = self.options['output']
if self.options.get('inclusive', False):
self['inclusive'] = True
if self.options.get('exclude', []):
self['exclude'].extend(self.options['exclude'])
if self['output'] is None:
self['output'] = os.path.join(os.getcwd(), 'dockerstache-output')
self['output_path'] = self.abs_output_dir()
self['input_path'] = self.abs_input_dir()
if self['context'] is not None:
self['context_path'] = absolute_path(self['context'])
if self['defaults'] is not None:
self['defaults_path'] = absolute_path(self['defaults'])
def __enter__(self):
self.load()
self.pre_script()
return self
def __exit__(self, *args):
if args:
if args[0] is None:
self.post_script()
else:
msg = "Error running dockerstache command: {}".format(args[0])
LOGGER.error(msg)
raise
def abs_input_dir(self):
"""
compute the abs path to the input dir
"""
return absolute_path(self.template_dir)
def abs_output_dir(self):
"""compute absolute output path"""
return absolute_path(self['output'])
def env_dictionary(self):
"""
convert the options to this script into an
env var dictionary for pre and post scripts
"""
none_to_str = lambda x: str(x) if x else ""
return {"DOCKERSTACHE_{}".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}
def pre_script(self):
"""
execute the pre script if it is defined
"""
if self['pre_script'] is None:
return
LOGGER.info("Executing pre script: {}".format(self['pre_script']))
cmd = self['pre_script']
execute_command(self.abs_input_dir(), cmd, self.env_dictionary())
LOGGER.info("Pre Script completed")
def post_script(self):
if self['post_script'] is None:
return
LOGGER.info("Executing post script: {}".format(self['post_script']))
cmd = self['post_script']
execute_command(self.template_dir, cmd, self.env_dictionary())
LOGGER.info("Post Script completed")
|
evansde77/dockerstache
|
src/dockerstache/dotfile.py
|
Dotfile.load
|
python
|
def load(self):
if self.exists():
with open(self.dot_file, 'r') as handle:
self.update(json.load(handle))
if self.options['context'] is not None:
self['context'] = self.options['context']
else:
self.options['context'] = self['context']
if self.options['defaults'] is not None:
self['defaults'] = self.options['defaults']
else:
self.options['defaults'] = self['defaults']
if self.options['output'] is not None:
self['output'] = self.options['output']
if self.options.get('inclusive', False):
self['inclusive'] = True
if self.options.get('exclude', []):
self['exclude'].extend(self.options['exclude'])
if self['output'] is None:
self['output'] = os.path.join(os.getcwd(), 'dockerstache-output')
self['output_path'] = self.abs_output_dir()
self['input_path'] = self.abs_input_dir()
if self['context'] is not None:
self['context_path'] = absolute_path(self['context'])
if self['defaults'] is not None:
self['defaults_path'] = absolute_path(self['defaults'])
|
read dotfile and populate self
opts will override the dotfile settings,
make sure everything is synced in both
opts and this object
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/dotfile.py#L91-L126
|
[
"def exists(self):\n \"\"\"check dotfile exists\"\"\"\n return os.path.exists(self.dot_file)\n"
] |
class Dotfile(dict):
"""
object to encapsulate access to the .dockerstache
file in a template
"""
def __init__(self, opts):
super(Dotfile, self).__init__()
self.options = opts
self.template_dir = opts['input']
self.dot_file = os.path.join(self.template_dir, '.dockerstache')
self.setdefault('pre_script', None)
self.setdefault('post_script', None)
self.setdefault('context', None)
self.setdefault('defaults', None)
self.setdefault('output', None)
self.setdefault('output_path', None)
self.setdefault('context_path', None)
self.setdefault('defaults_path', None)
self.setdefault('input', self.template_dir)
self.setdefault('input_path', None)
self.setdefault('inclusive', False)
self.setdefault('exclude', [])
def exists(self):
"""check dotfile exists"""
return os.path.exists(self.dot_file)
def __enter__(self):
self.load()
self.pre_script()
return self
def __exit__(self, *args):
if args:
if args[0] is None:
self.post_script()
else:
msg = "Error running dockerstache command: {}".format(args[0])
LOGGER.error(msg)
raise
def abs_input_dir(self):
"""
compute the abs path to the input dir
"""
return absolute_path(self.template_dir)
def abs_output_dir(self):
"""compute absolute output path"""
return absolute_path(self['output'])
def env_dictionary(self):
"""
convert the options to this script into an
env var dictionary for pre and post scripts
"""
none_to_str = lambda x: str(x) if x else ""
return {"DOCKERSTACHE_{}".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}
def pre_script(self):
"""
execute the pre script if it is defined
"""
if self['pre_script'] is None:
return
LOGGER.info("Executing pre script: {}".format(self['pre_script']))
cmd = self['pre_script']
execute_command(self.abs_input_dir(), cmd, self.env_dictionary())
LOGGER.info("Pre Script completed")
def post_script(self):
if self['post_script'] is None:
return
LOGGER.info("Executing post script: {}".format(self['post_script']))
cmd = self['post_script']
execute_command(self.template_dir, cmd, self.env_dictionary())
LOGGER.info("Post Script completed")
|
evansde77/dockerstache
|
src/dockerstache/dotfile.py
|
Dotfile.env_dictionary
|
python
|
def env_dictionary(self):
none_to_str = lambda x: str(x) if x else ""
return {"DOCKERSTACHE_{}".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}
|
convert the options to this script into an
env var dictionary for pre and post scripts
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/dotfile.py#L152-L158
| null |
class Dotfile(dict):
"""
object to encapsulate access to the .dockerstache
file in a template
"""
def __init__(self, opts):
super(Dotfile, self).__init__()
self.options = opts
self.template_dir = opts['input']
self.dot_file = os.path.join(self.template_dir, '.dockerstache')
self.setdefault('pre_script', None)
self.setdefault('post_script', None)
self.setdefault('context', None)
self.setdefault('defaults', None)
self.setdefault('output', None)
self.setdefault('output_path', None)
self.setdefault('context_path', None)
self.setdefault('defaults_path', None)
self.setdefault('input', self.template_dir)
self.setdefault('input_path', None)
self.setdefault('inclusive', False)
self.setdefault('exclude', [])
def exists(self):
"""check dotfile exists"""
return os.path.exists(self.dot_file)
def load(self):
"""
read dotfile and populate self
opts will override the dotfile settings,
make sure everything is synced in both
opts and this object
"""
if self.exists():
with open(self.dot_file, 'r') as handle:
self.update(json.load(handle))
if self.options['context'] is not None:
self['context'] = self.options['context']
else:
self.options['context'] = self['context']
if self.options['defaults'] is not None:
self['defaults'] = self.options['defaults']
else:
self.options['defaults'] = self['defaults']
if self.options['output'] is not None:
self['output'] = self.options['output']
if self.options.get('inclusive', False):
self['inclusive'] = True
if self.options.get('exclude', []):
self['exclude'].extend(self.options['exclude'])
if self['output'] is None:
self['output'] = os.path.join(os.getcwd(), 'dockerstache-output')
self['output_path'] = self.abs_output_dir()
self['input_path'] = self.abs_input_dir()
if self['context'] is not None:
self['context_path'] = absolute_path(self['context'])
if self['defaults'] is not None:
self['defaults_path'] = absolute_path(self['defaults'])
def __enter__(self):
self.load()
self.pre_script()
return self
def __exit__(self, *args):
if args:
if args[0] is None:
self.post_script()
else:
msg = "Error running dockerstache command: {}".format(args[0])
LOGGER.error(msg)
raise
def abs_input_dir(self):
"""
compute the abs path to the input dir
"""
return absolute_path(self.template_dir)
def abs_output_dir(self):
"""compute absolute output path"""
return absolute_path(self['output'])
def pre_script(self):
"""
execute the pre script if it is defined
"""
if self['pre_script'] is None:
return
LOGGER.info("Executing pre script: {}".format(self['pre_script']))
cmd = self['pre_script']
execute_command(self.abs_input_dir(), cmd, self.env_dictionary())
LOGGER.info("Pre Script completed")
def post_script(self):
if self['post_script'] is None:
return
LOGGER.info("Executing post script: {}".format(self['post_script']))
cmd = self['post_script']
execute_command(self.template_dir, cmd, self.env_dictionary())
LOGGER.info("Post Script completed")
|
evansde77/dockerstache
|
src/dockerstache/dotfile.py
|
Dotfile.pre_script
|
python
|
def pre_script(self):
if self['pre_script'] is None:
return
LOGGER.info("Executing pre script: {}".format(self['pre_script']))
cmd = self['pre_script']
execute_command(self.abs_input_dir(), cmd, self.env_dictionary())
LOGGER.info("Pre Script completed")
|
execute the pre script if it is defined
|
train
|
https://github.com/evansde77/dockerstache/blob/929c102e9fffde322dbf17f8e69533a00976aacb/src/dockerstache/dotfile.py#L160-L169
|
[
"def execute_command(working_dir, cmd, env_dict):\n \"\"\"\n execute_command: run the command provided in the working dir\n specified adding the env_dict settings to the\n execution environment\n\n :param working_dir: path to directory to execute command\n also gets added to the PATH\n :param cmd: Shell command to execute\n :param env_dict: dictionary of additional env vars to\n be passed to the subprocess environment\n\n \"\"\"\n proc_env = os.environ.copy()\n proc_env[\"PATH\"] = \"{}:{}:.\".format(proc_env[\"PATH\"], working_dir)\n proc_env.update(env_dict)\n proc = subprocess.Popen(\n cmd,\n cwd=working_dir,\n env=proc_env,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n status = proc.wait()\n stdout, stderr = proc.communicate()\n if status:\n msg = (\n \"Non zero {} exit from command {}\\n\"\n \"Stdout: {}\\n\"\n \"Stderr: {}\\n\"\n ).format(status, cmd, stdout, stderr)\n LOGGER.error(msg)\n raise RuntimeError(msg)\n LOGGER.info(stdout)\n",
"def abs_input_dir(self):\n \"\"\"\n compute the abs path to the input dir\n \"\"\"\n return absolute_path(self.template_dir)\n",
"def env_dictionary(self):\n \"\"\"\n convert the options to this script into an\n env var dictionary for pre and post scripts\n \"\"\"\n none_to_str = lambda x: str(x) if x else \"\"\n return {\"DOCKERSTACHE_{}\".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}\n"
] |
class Dotfile(dict):
"""
object to encapsulate access to the .dockerstache
file in a template
"""
def __init__(self, opts):
super(Dotfile, self).__init__()
self.options = opts
self.template_dir = opts['input']
self.dot_file = os.path.join(self.template_dir, '.dockerstache')
self.setdefault('pre_script', None)
self.setdefault('post_script', None)
self.setdefault('context', None)
self.setdefault('defaults', None)
self.setdefault('output', None)
self.setdefault('output_path', None)
self.setdefault('context_path', None)
self.setdefault('defaults_path', None)
self.setdefault('input', self.template_dir)
self.setdefault('input_path', None)
self.setdefault('inclusive', False)
self.setdefault('exclude', [])
def exists(self):
"""check dotfile exists"""
return os.path.exists(self.dot_file)
def load(self):
"""
read dotfile and populate self
opts will override the dotfile settings,
make sure everything is synced in both
opts and this object
"""
if self.exists():
with open(self.dot_file, 'r') as handle:
self.update(json.load(handle))
if self.options['context'] is not None:
self['context'] = self.options['context']
else:
self.options['context'] = self['context']
if self.options['defaults'] is not None:
self['defaults'] = self.options['defaults']
else:
self.options['defaults'] = self['defaults']
if self.options['output'] is not None:
self['output'] = self.options['output']
if self.options.get('inclusive', False):
self['inclusive'] = True
if self.options.get('exclude', []):
self['exclude'].extend(self.options['exclude'])
if self['output'] is None:
self['output'] = os.path.join(os.getcwd(), 'dockerstache-output')
self['output_path'] = self.abs_output_dir()
self['input_path'] = self.abs_input_dir()
if self['context'] is not None:
self['context_path'] = absolute_path(self['context'])
if self['defaults'] is not None:
self['defaults_path'] = absolute_path(self['defaults'])
def __enter__(self):
self.load()
self.pre_script()
return self
def __exit__(self, *args):
if args:
if args[0] is None:
self.post_script()
else:
msg = "Error running dockerstache command: {}".format(args[0])
LOGGER.error(msg)
raise
def abs_input_dir(self):
"""
compute the abs path to the input dir
"""
return absolute_path(self.template_dir)
def abs_output_dir(self):
"""compute absolute output path"""
return absolute_path(self['output'])
def env_dictionary(self):
"""
convert the options to this script into an
env var dictionary for pre and post scripts
"""
none_to_str = lambda x: str(x) if x else ""
return {"DOCKERSTACHE_{}".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}
def post_script(self):
if self['post_script'] is None:
return
LOGGER.info("Executing post script: {}".format(self['post_script']))
cmd = self['post_script']
execute_command(self.template_dir, cmd, self.env_dictionary())
LOGGER.info("Post Script completed")
|
ScottDuckworth/python-anyvcs
|
anyvcs/git.py
|
GitRepo.clone
|
python
|
def clone(cls, srcpath, destpath, encoding='utf-8'):
cmd = [GIT, 'clone', '--quiet', '--bare', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath, encoding)
|
Clone an existing repository to a new bare repository.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/git.py#L59-L63
| null |
class GitRepo(VCSRepo):
"""A git repository
Valid revisions are anything that git considers as a revision.
"""
@classmethod
@classmethod
def create(cls, path, encoding='utf-8'):
"""Create a new bare repository"""
cmd = [GIT, 'init', '--quiet', '--bare', path]
subprocess.check_call(cmd)
return cls(path, encoding)
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
return path
def canonical_rev(self, rev):
rev = str(rev)
if rev_rx.match(rev):
return rev
else:
cmd = [GIT, 'rev-parse', rev]
return self._command(cmd).decode().rstrip()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev = self.canonical_rev(rev)
path = type(self).cleanPath(path)
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
ltrim = len(path)
# make sure the path exists
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = rev
return [entry]
else:
epath = path.rstrip('/').encode(self.encoding)
cmd = [GIT, 'ls-tree', '-z', rev, '--', epath]
output = self._command(cmd)
if not output:
raise PathDoesNotExist(rev, path)
meta, ename = output.split(b'\t', 1)
meta = meta.decode().split()
if meta[1] == 'tree':
if not (directory or path.endswith('/')):
path = path + '/'
elif forcedir:
raise PathDoesNotExist(rev, path)
cmd = [GIT, 'ls-tree', '-z']
if recursive:
cmd.append('-r')
if recursive_dirs:
cmd.append('-t')
if 'size' in report:
cmd.append('-l')
epath = path.encode(self.encoding)
cmd.extend([rev, '--', epath])
output = self._command(cmd).rstrip(b'\0')
if not output:
return []
results = []
files = {}
for line in output.split(b'\0'):
meta, ename = line.split(b'\t', 1)
meta = meta.decode().split()
mode = int(meta[0], 8)
name = ename.decode(self.encoding, 'replace')
if recursive_dirs and path == name + '/':
continue
assert name.startswith(path), 'unexpected output: ' + str(line)
entry = attrdict(path=name)
entry_name = name[ltrim:].lstrip('/')
if entry_name:
entry.name = entry_name
if stat.S_ISDIR(mode):
entry.type = 'd'
elif stat.S_ISREG(mode):
entry.type = 'f'
if 'executable' in report:
entry.executable = bool(mode & stat.S_IXUSR)
if 'size' in report:
entry.size = int(meta[3])
elif stat.S_ISLNK(mode):
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(rev, ename).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + str(line)
results.append(entry)
files[ename] = entry
if 'commit' in report:
cmd = [GIT, 'log', '--pretty=format:%H', '--name-only', '-m', '--first-parent', '-z', rev]
p = subprocess.Popen(cmd, cwd=self.path, stdout=subprocess.PIPE)
commit = readuntil(p.stdout, b'\n').rstrip().split(b'\0')[-1]
while commit and files:
while True:
f = readuntil(p.stdout, b'\0')
if f == b'':
commit = readuntil(p.stdout, b'\n').split(b'\0')[-1]
break
if not recursive:
d = f[len(path):].find(b'/')
if d != -1:
f = f[:len(path) + d]
if f in files:
files[f].commit = commit.decode()
del files[f]
p.stdout.close()
p.terminate()
p.wait()
return results
def _cat(self, rev, path):
rp = rev.encode('ascii') + b':' + path
cmd = [GIT, 'cat-file', 'blob', rp]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
epath = path.encode(self.encoding, 'strict')
return self._cat(rev, epath)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
epath = path.encode(self.encoding, 'strict')
return self._cat(rev, epath).decode(self.encoding, 'replace')
def branches(self):
cmd = [GIT, 'branch']
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = branch_rx.match(line)
assert m, 'unexpected output: ' + str(line)
results.append(m.group('name'))
return results
def tags(self):
cmd = [GIT, 'tag']
output = self._command(cmd).decode(self.encoding, 'replace')
return output.splitlines()
def heads(self):
return self.branches() + self.tags()
def empty(self):
cmd = [GIT, 'rev-list', '-n1', '--all']
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return not rev_rx.match(stdout.decode())
def __contains__(self, rev):
cmd = [GIT, 'rev-list', '-n', '1', rev]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [GIT, 'rev-list', '--all']
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return len(stdout.splitlines())
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [GIT, 'log', '-z', '--pretty=format:%H%n%P%n%ai%n%an <%ae>%n%B', '--encoding=none']
if limit is not None:
cmd.append('-' + str(limit))
if firstparent:
cmd.append('--first-parent')
if merges is not None:
if merges:
cmd.append('--merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
if self.empty():
return []
cmd.append('--all')
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
if self.empty():
return []
cmd.append('--all')
else:
cmd.append(revrange[1])
else:
if revrange[1] is None:
cmd.append(revrange[0] + '..')
else:
cmd.append(revrange[0] + '..' + revrange[1])
else:
rev = self.canonical_rev(revrange)
entry = self._commit_cache.get(rev)
if entry:
entry._cached = True
return entry
cmd.extend(['-1', rev])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for log in output.split('\0'):
rev, parents, date, author, message = log.split('\n', 4)
parents = parents.split()
date = parse_isodate(date)
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [GIT, 'diff-tree', '-z', '-C', '-r', '-m', '--no-commit-id', '--first-parent', '--root', rev]
output = self._command(cmd)
results = []
for line in output.rstrip(b'\0').split(b'\0:'):
if not line:
continue
path = line.split(b'\0')
meta = path.pop(0).split()
status = meta[3].decode()[0]
src_path = path[0].decode(self.encoding, 'replace')
if len(path) == 2:
dst_path = path[1].decode(self.encoding, 'replace')
entry = FileChangeInfo(dst_path, str(status), src_path)
else:
entry = FileChangeInfo(src_path, str(status))
results.append(entry)
return results
def pdiff(self, rev):
cmd = [GIT, 'diff-tree', '-p', '-r', '-m', '--no-commit-id', '--first-parent', '--root', rev]
return self._command(cmd).decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [GIT, 'diff', rev_a, rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [GIT, 'merge-base', rev1, rev2]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
if p.returncode == 0:
return stdout.decode().rstrip()
elif p.returncode == 1:
return None
else:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
cmd = [GIT, 'blame', '--root', '--encoding=none', '-p', rev, '--', path]
output = self._command(cmd)
rev = None
revinfo = {}
results = []
for line in output.splitlines():
if line.startswith(b'\t'):
ri = revinfo[rev]
author = ri['author'] + ' ' + ri['author-mail']
ts = int(ri['author-time'])
tz = UTCOffset(str(ri['author-tz']))
date = datetime.datetime.fromtimestamp(ts, tz)
entry = BlameInfo(rev, author, date, line[1:])
results.append(entry)
else:
k, v = line.decode(self.encoding, 'replace').split(None, 1)
if rev_rx.match(k):
rev = k
else:
revinfo.setdefault(rev, {})[k] = v
return results
def tip(self, head):
return self.canonical_rev(head)
|
ScottDuckworth/python-anyvcs
|
anyvcs/git.py
|
GitRepo.create
|
python
|
def create(cls, path, encoding='utf-8'):
cmd = [GIT, 'init', '--quiet', '--bare', path]
subprocess.check_call(cmd)
return cls(path, encoding)
|
Create a new bare repository
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/git.py#L66-L70
| null |
class GitRepo(VCSRepo):
"""A git repository
Valid revisions are anything that git considers as a revision.
"""
@classmethod
def clone(cls, srcpath, destpath, encoding='utf-8'):
"""Clone an existing repository to a new bare repository."""
cmd = [GIT, 'clone', '--quiet', '--bare', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath, encoding)
@classmethod
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
return path
def canonical_rev(self, rev):
rev = str(rev)
if rev_rx.match(rev):
return rev
else:
cmd = [GIT, 'rev-parse', rev]
return self._command(cmd).decode().rstrip()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev = self.canonical_rev(rev)
path = type(self).cleanPath(path)
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
ltrim = len(path)
# make sure the path exists
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = rev
return [entry]
else:
epath = path.rstrip('/').encode(self.encoding)
cmd = [GIT, 'ls-tree', '-z', rev, '--', epath]
output = self._command(cmd)
if not output:
raise PathDoesNotExist(rev, path)
meta, ename = output.split(b'\t', 1)
meta = meta.decode().split()
if meta[1] == 'tree':
if not (directory or path.endswith('/')):
path = path + '/'
elif forcedir:
raise PathDoesNotExist(rev, path)
cmd = [GIT, 'ls-tree', '-z']
if recursive:
cmd.append('-r')
if recursive_dirs:
cmd.append('-t')
if 'size' in report:
cmd.append('-l')
epath = path.encode(self.encoding)
cmd.extend([rev, '--', epath])
output = self._command(cmd).rstrip(b'\0')
if not output:
return []
results = []
files = {}
for line in output.split(b'\0'):
meta, ename = line.split(b'\t', 1)
meta = meta.decode().split()
mode = int(meta[0], 8)
name = ename.decode(self.encoding, 'replace')
if recursive_dirs and path == name + '/':
continue
assert name.startswith(path), 'unexpected output: ' + str(line)
entry = attrdict(path=name)
entry_name = name[ltrim:].lstrip('/')
if entry_name:
entry.name = entry_name
if stat.S_ISDIR(mode):
entry.type = 'd'
elif stat.S_ISREG(mode):
entry.type = 'f'
if 'executable' in report:
entry.executable = bool(mode & stat.S_IXUSR)
if 'size' in report:
entry.size = int(meta[3])
elif stat.S_ISLNK(mode):
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(rev, ename).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + str(line)
results.append(entry)
files[ename] = entry
if 'commit' in report:
cmd = [GIT, 'log', '--pretty=format:%H', '--name-only', '-m', '--first-parent', '-z', rev]
p = subprocess.Popen(cmd, cwd=self.path, stdout=subprocess.PIPE)
commit = readuntil(p.stdout, b'\n').rstrip().split(b'\0')[-1]
while commit and files:
while True:
f = readuntil(p.stdout, b'\0')
if f == b'':
commit = readuntil(p.stdout, b'\n').split(b'\0')[-1]
break
if not recursive:
d = f[len(path):].find(b'/')
if d != -1:
f = f[:len(path) + d]
if f in files:
files[f].commit = commit.decode()
del files[f]
p.stdout.close()
p.terminate()
p.wait()
return results
def _cat(self, rev, path):
rp = rev.encode('ascii') + b':' + path
cmd = [GIT, 'cat-file', 'blob', rp]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
epath = path.encode(self.encoding, 'strict')
return self._cat(rev, epath)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
epath = path.encode(self.encoding, 'strict')
return self._cat(rev, epath).decode(self.encoding, 'replace')
def branches(self):
cmd = [GIT, 'branch']
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = branch_rx.match(line)
assert m, 'unexpected output: ' + str(line)
results.append(m.group('name'))
return results
def tags(self):
cmd = [GIT, 'tag']
output = self._command(cmd).decode(self.encoding, 'replace')
return output.splitlines()
def heads(self):
return self.branches() + self.tags()
def empty(self):
cmd = [GIT, 'rev-list', '-n1', '--all']
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return not rev_rx.match(stdout.decode())
def __contains__(self, rev):
cmd = [GIT, 'rev-list', '-n', '1', rev]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [GIT, 'rev-list', '--all']
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return len(stdout.splitlines())
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [GIT, 'log', '-z', '--pretty=format:%H%n%P%n%ai%n%an <%ae>%n%B', '--encoding=none']
if limit is not None:
cmd.append('-' + str(limit))
if firstparent:
cmd.append('--first-parent')
if merges is not None:
if merges:
cmd.append('--merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
if self.empty():
return []
cmd.append('--all')
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
if self.empty():
return []
cmd.append('--all')
else:
cmd.append(revrange[1])
else:
if revrange[1] is None:
cmd.append(revrange[0] + '..')
else:
cmd.append(revrange[0] + '..' + revrange[1])
else:
rev = self.canonical_rev(revrange)
entry = self._commit_cache.get(rev)
if entry:
entry._cached = True
return entry
cmd.extend(['-1', rev])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for log in output.split('\0'):
rev, parents, date, author, message = log.split('\n', 4)
parents = parents.split()
date = parse_isodate(date)
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [GIT, 'diff-tree', '-z', '-C', '-r', '-m', '--no-commit-id', '--first-parent', '--root', rev]
output = self._command(cmd)
results = []
for line in output.rstrip(b'\0').split(b'\0:'):
if not line:
continue
path = line.split(b'\0')
meta = path.pop(0).split()
status = meta[3].decode()[0]
src_path = path[0].decode(self.encoding, 'replace')
if len(path) == 2:
dst_path = path[1].decode(self.encoding, 'replace')
entry = FileChangeInfo(dst_path, str(status), src_path)
else:
entry = FileChangeInfo(src_path, str(status))
results.append(entry)
return results
def pdiff(self, rev):
cmd = [GIT, 'diff-tree', '-p', '-r', '-m', '--no-commit-id', '--first-parent', '--root', rev]
return self._command(cmd).decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [GIT, 'diff', rev_a, rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [GIT, 'merge-base', rev1, rev2]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
if p.returncode == 0:
return stdout.decode().rstrip()
elif p.returncode == 1:
return None
else:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
cmd = [GIT, 'blame', '--root', '--encoding=none', '-p', rev, '--', path]
output = self._command(cmd)
rev = None
revinfo = {}
results = []
for line in output.splitlines():
if line.startswith(b'\t'):
ri = revinfo[rev]
author = ri['author'] + ' ' + ri['author-mail']
ts = int(ri['author-time'])
tz = UTCOffset(str(ri['author-tz']))
date = datetime.datetime.fromtimestamp(ts, tz)
entry = BlameInfo(rev, author, date, line[1:])
results.append(entry)
else:
k, v = line.decode(self.encoding, 'replace').split(None, 1)
if rev_rx.match(k):
rev = k
else:
revinfo.setdefault(rev, {})[k] = v
return results
def tip(self, head):
return self.canonical_rev(head)
|
ScottDuckworth/python-anyvcs
|
anyvcs/common.py
|
parse_isodate
|
python
|
def parse_isodate(datestr):
m = isodate_rx.search(datestr)
assert m, 'unrecognized date format: ' + datestr
year, month, day = m.group('year', 'month', 'day')
hour, minute, second, fraction = m.group('hour', 'minute', 'second', 'fraction')
tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm')
dt = datetime.datetime(int(year), int(month), int(day), int(hour))
if fraction is None:
fraction = 0
else:
fraction = float('0.' + fraction)
if minute is None:
dt = dt.replace(minute=int(60 * fraction))
else:
dt = dt.replace(minute=int(minute))
if second is None:
dt = dt.replace(second=int(60 * fraction))
else:
dt = dt.replace(second=int(second), microsecond=int(1000000 * fraction))
if tz is not None:
if tz[0] == 'Z':
offset = 0
else:
offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh))
if tz[0] == '-':
offset = -offset
dt = dt.replace(tzinfo=UTCOffset(offset))
return dt
|
Parse a string that loosely fits ISO 8601 formatted date-time string
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/common.py#L43-L72
| null |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import json
import os
import re
import subprocess
from abc import ABCMeta, abstractmethod, abstractproperty
from functools import wraps
from .hashdict import HashDict
multislash_rx = re.compile(r'//+')
isodate_rx = re.compile(r'(?P<year>\d{4})-?(?P<month>\d{2})-?(?P<day>\d{2})(?:\s*(?:T\s*)?(?P<hour>\d{2})(?::?(?P<minute>\d{2})(?::?(?P<second>\d{2}))?)?(?:[,.](?P<fraction>\d+))?(?:\s*(?P<tz>(?:Z|[+-](?P<tzhh>\d{2})(?::?(?P<tzmm>\d{2}))?)))?)')
tz_rx = re.compile(r'^(?P<tz>(?:Z|[+-](?P<tzhh>\d{2})(?::?(?P<tzmm>\d{2}))?))$')
def command(cmd, input=None, **kwargs):
try:
output = subprocess.check_output(cmd, **kwargs)
return output
except AttributeError: # subprocess.check_output added in python 2.7
kwargs.setdefault('stdout', subprocess.PIPE)
p = subprocess.Popen(cmd, **kwargs)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
class ABCMetaDocStringInheritor(ABCMeta):
'''A variation on
http://groups.google.com/group/comp.lang.python/msg/26f7b4fcb4d66c95
by Paul McGuire
'''
def __new__(meta, name, bases, clsdict):
if not('__doc__' in clsdict and clsdict['__doc__']):
for mro_cls in (mro_cls for base in bases for mro_cls in base.mro()):
doc = mro_cls.__doc__
if doc:
clsdict['__doc__'] = doc
break
for attr, attribute in clsdict.items():
if not attribute.__doc__:
for mro_cls in (
mro_cls for base in bases for mro_cls in base.mro()
if hasattr(mro_cls, attr)
):
doc = getattr(getattr(mro_cls, attr), '__doc__')
if doc:
attribute.__doc__ = doc
break
return ABCMeta.__new__(meta, name, bases, clsdict)
class UnknownVCSType(Exception):
pass
class RevisionPathException(Exception):
def __init__(self, rev, path):
super(RevisionPathException, self).__init__(rev, path)
class PathDoesNotExist(RevisionPathException):
pass
class BadFileType(RevisionPathException):
pass
class attrdict(dict):
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
if name.startswith('_'):
dict.__setattr__(self, name, value)
else:
self.__setitem__(name, value)
def __delattr__(self, name):
self.__delitem__(name)
class CommitLogEntry(object):
"""Represents a single entry in the commit log
:ivar rev: Revision name
:ivar parents: Parents of the revision
:ivar datetime date: Timestamp of the revision
:ivar str author: Author of the revision
:ivar str message: Message from committer
"""
def __init__(self, rev, parents, date, author, message):
self.rev = rev
self.parents = parents
self.date = date
self.author = author
self.message = message
def __str__(self):
return str(self.rev)
def __repr__(self):
return str('<%s.%s %s>' % (type(self).__module__, type(self).__name__, self.rev))
@property
def subject(self):
"""First line of the commit message."""
return self.message.split('\n', 1)[0]
def to_json(self):
return json.dumps({
'v': 1,
'r': self.rev,
'p': self.parents,
'd': self.date.isoformat(),
'a': self.author,
'm': self.message,
})
@classmethod
def from_json(cls, s):
o = json.loads(s)
if o.get('v') != 1:
return None
return cls(
rev=o['r'],
parents=o['p'],
date=parse_isodate(o['d']),
author=o['a'],
message=o['m'],
)
class CommitLogCache(HashDict):
def __getitem__(self, key):
value = HashDict.__getitem__(self, key)
value = CommitLogEntry.from_json(value.decode())
if value:
return value
raise KeyError(key)
def __setitem__(self, key, value):
value = value.to_json().encode()
HashDict.__setitem__(self, key, value)
class FileChangeInfo(object):
"""Represents a change to a single path.
:ivar str path: The path that was changed.
:ivar str status: VCS-specific code for the change type.
:ivar copy: The source path copied from, if any.
"""
def __init__(self, path, status, copy=None):
self.path = path
self.status = status
self.copy = copy
class BlameInfo(object):
"""Represents an annotated line in a file for a blame view.
:ivar rev: Revision at which the line was last changed
:ivar str author: Author of the change
:ivar datetime date: Timestamp of the change
:ivar str line: Line data from the file.
"""
def __init__(self, rev, author, date, line):
self.rev = rev
self.author = author
self.date = date
self.line = line
class UTCOffset(datetime.tzinfo):
ZERO = datetime.timedelta()
def __init__(self, offset, name=None):
if isinstance(offset, datetime.timedelta):
self.offset = offset
elif isinstance(offset, str):
m = tz_rx.match(offset)
assert m
tz, tzhh, tzmm = m.group('tz', 'tzhh', 'tzmm')
offset = datetime.timedelta(minutes=int(tzmm or 0), hours=int(tzhh))
if tz[0] == '-':
offset = -offset
self.offset = offset
else:
self.offset = datetime.timedelta(minutes=offset)
if name is not None:
self.name = name
elif self.offset < type(self).ZERO:
self.name = '-%02d%02d' % divmod((-self.offset).seconds / 60, 60)
else:
self.name = '+%02d%02d' % divmod(self.offset.seconds / 60, 60)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return type(self).ZERO
def tzname(self, dt):
return self.name
class VCSRepo(object):
__metaclass__ = ABCMetaDocStringInheritor
def __init__(self, path, encoding='utf-8'):
"""Open an existing repository"""
self.path = path
self.encoding = encoding
@abstractproperty
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
raise NotImplementedError
@property
def _commit_cache(self):
try:
return self._commit_cache_v
except AttributeError:
commit_cache_path = os.path.join(self.private_path, 'commit-cache')
self._commit_cache_v = CommitLogCache(commit_cache_path)
return self._commit_cache_v
def _command(self, cmd, input=None, **kwargs):
kwargs.setdefault('cwd', self.path)
return command(cmd, **kwargs)
@classmethod
def cleanPath(cls, path):
path = path.lstrip('/')
path = multislash_rx.sub('/', path)
return path
@abstractmethod
def canonical_rev(self, rev):
""" Get the canonical revision identifier
:param rev: The revision to canonicalize.
:returns: The canonicalized revision
The canonical revision is the revision which is natively supported by
the underlying VCS type. In some cases, anyvcs may annotate a revision
identifier to also encode branch information which is not safe to use
directly with the VCS itself (e.g. as created by :meth:`compose_rev`).
This method is a means of converting back to canonical form.
"""
raise NotImplementedError
@abstractmethod
def compose_rev(self, branch, rev):
""" Compose a revision identifier which encodes branch and revision.
:param str branch: A branch name
:param rev: A revision (can be canonical or as constructed by
:meth:`compose_rev()` or :meth:`tip()`)
The revision identifier encodes branch and revision information
according to the particular VCS type. This is a means to unify the
various branching models under a common interface.
"""
raise NotImplementedError
@abstractmethod
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
"""List directory or file
:param rev: The revision to use.
:param path: The path to list. May start with a '/' or not. Directories
may end with a '/' or not.
:param recursive: Recursively list files in subdirectories.
:param recursive_dirs: Used when recursive=True, also list directories.
:param directory: If path is a directory, list path itself instead of
its contents.
:param report: A list or tuple of extra attributes to return that may
require extra processing. Recognized values are 'size',
'target', 'executable', and 'commit'.
Returns a list of dictionaries with the following keys:
**type**
The type of the file: 'f' for file, 'd' for directory, 'l' for
symlink.
**name**
The name of the file. Not present if directory=True.
**size**
The size of the file. Only present for files when 'size' is in
report.
**target**
The target of the symlink. Only present for symlinks when
'target' is in report.
**executable**
True if the file is executable, False otherwise. Only present
for files when 'executable' is in report.
Raises PathDoesNotExist if the path does not exist.
"""
raise NotImplementedError
@abstractmethod
def cat(self, rev, path):
"""Get file contents
:param rev: The revision to use.
:param str path: The path to the file. Must be a file.
:returns: The contents of the file.
:rtype: str or bytes
:raises PathDoesNotExist: If the path does not exist.
:raises BadFileType: If the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def readlink(self, rev, path):
"""Get symbolic link target
:param rev: The revision to use.
:param str path: The path to the file. Must be a symbolic link.
:returns str: The target of the symbolic link.
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a symbolic link.
"""
raise NotImplementedError
@abstractmethod
def branches(self):
"""Get list of branches
:returns: The branches in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def tags(self):
"""Get list of tags
:returns: The tags in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def heads(self):
"""Get list of heads
:returns: The heads in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def empty(self):
"""Test if the repository contains any commits
:returns bool: True if the repository contains no commits.
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
def __contains__(self, rev):
"""Test if the repository contains the specified revision
"""
return NotImplementedError
@abstractmethod
def __len__(self):
"""Returns the number of commits in the repository
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
"""Get commit logs
:param revrange: Either a single revision or a range of revisions as a
2-element list or tuple.
:param int limit: Limit the number of log entries.
:param bool firstparent: Only follow the first parent of merges.
:param bool merges: True means only merges, False means no merges,
None means both merges and non-merges.
:param str path: Only match commits containing changes on this path.
:param bool follow: Follow file history across renames.
:returns: log information
:rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry`
If revrange is None, return a list of all log entries in reverse
chronological order.
If revrange is a single revision, return a single log entry.
If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log
entries starting at B and following that branch back to A or one of its
ancestors (not inclusive. If A is None, follow branch B back to the
beginning of history. If B is None, list all descendants in reverse
chronological order.
"""
raise NotImplementedError
@abstractmethod
def changed(self, rev):
"""Files that changed from the rev's parent(s)
:param rev: The revision to get the files that changed.
:type rev: list of :class:`FileChangeInfo`.
"""
raise NotImplementedError
@abstractmethod
def pdiff(self, rev):
"""Diff from the rev's parent(s)
:param rev: The rev to compute the diff from its parent.
:returns str: The diff.
The returned string is a unified diff that the rev introduces with a
prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def diff(self, rev_a, rev_b, path=None):
"""Diff of two revisions
:param rev_a: The start revision.
:param rev_b: The end revision.
:param path: If not None, return diff for only that file.
:type path: None or str
:returns str: The diff.
The returned string contains the unified diff from rev_a to rev_b with
a prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def ancestor(self, rev1, rev2):
"""Find most recent common ancestor of two revisions
:param rev1: First revision.
:param rev2: Second revision.
:returns: The common ancestor revision between the two.
"""
raise NotImplementedError
@abstractmethod
def blame(self, rev, path):
"""Blame (a.k.a. annotate, praise) a file
:param rev: The revision to blame.
:param str path: The path to blame.
:returns: list of annotated lines of the given path
:rtype: list of :class:`BlameInfo` objects
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def tip(self, head):
"""Find the tip of a named head
:param str head: name of head to look up
:returns: revision identifier of head
The returned identifier should be a valid input for :meth:`VCSRepo.ls`.
and respect the branch name in the returned identifier if applicable.
"""
raise NotImplementedError
# vi:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
|
ScottDuckworth/python-anyvcs
|
anyvcs/common.py
|
VCSRepo.ls
|
python
|
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
raise NotImplementedError
|
List directory or file
:param rev: The revision to use.
:param path: The path to list. May start with a '/' or not. Directories
may end with a '/' or not.
:param recursive: Recursively list files in subdirectories.
:param recursive_dirs: Used when recursive=True, also list directories.
:param directory: If path is a directory, list path itself instead of
its contents.
:param report: A list or tuple of extra attributes to return that may
require extra processing. Recognized values are 'size',
'target', 'executable', and 'commit'.
Returns a list of dictionaries with the following keys:
**type**
The type of the file: 'f' for file, 'd' for directory, 'l' for
symlink.
**name**
The name of the file. Not present if directory=True.
**size**
The size of the file. Only present for files when 'size' is in
report.
**target**
The target of the symlink. Only present for symlinks when
'target' is in report.
**executable**
True if the file is executable, False otherwise. Only present
for files when 'executable' is in report.
Raises PathDoesNotExist if the path does not exist.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/common.py#L338-L375
| null |
class VCSRepo(object):
__metaclass__ = ABCMetaDocStringInheritor
def __init__(self, path, encoding='utf-8'):
"""Open an existing repository"""
self.path = path
self.encoding = encoding
@abstractproperty
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
raise NotImplementedError
@property
def _commit_cache(self):
try:
return self._commit_cache_v
except AttributeError:
commit_cache_path = os.path.join(self.private_path, 'commit-cache')
self._commit_cache_v = CommitLogCache(commit_cache_path)
return self._commit_cache_v
def _command(self, cmd, input=None, **kwargs):
kwargs.setdefault('cwd', self.path)
return command(cmd, **kwargs)
@classmethod
def cleanPath(cls, path):
path = path.lstrip('/')
path = multislash_rx.sub('/', path)
return path
@abstractmethod
def canonical_rev(self, rev):
""" Get the canonical revision identifier
:param rev: The revision to canonicalize.
:returns: The canonicalized revision
The canonical revision is the revision which is natively supported by
the underlying VCS type. In some cases, anyvcs may annotate a revision
identifier to also encode branch information which is not safe to use
directly with the VCS itself (e.g. as created by :meth:`compose_rev`).
This method is a means of converting back to canonical form.
"""
raise NotImplementedError
@abstractmethod
def compose_rev(self, branch, rev):
""" Compose a revision identifier which encodes branch and revision.
:param str branch: A branch name
:param rev: A revision (can be canonical or as constructed by
:meth:`compose_rev()` or :meth:`tip()`)
The revision identifier encodes branch and revision information
according to the particular VCS type. This is a means to unify the
various branching models under a common interface.
"""
raise NotImplementedError
@abstractmethod
@abstractmethod
def cat(self, rev, path):
"""Get file contents
:param rev: The revision to use.
:param str path: The path to the file. Must be a file.
:returns: The contents of the file.
:rtype: str or bytes
:raises PathDoesNotExist: If the path does not exist.
:raises BadFileType: If the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def readlink(self, rev, path):
"""Get symbolic link target
:param rev: The revision to use.
:param str path: The path to the file. Must be a symbolic link.
:returns str: The target of the symbolic link.
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a symbolic link.
"""
raise NotImplementedError
@abstractmethod
def branches(self):
"""Get list of branches
:returns: The branches in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def tags(self):
"""Get list of tags
:returns: The tags in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def heads(self):
"""Get list of heads
:returns: The heads in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def empty(self):
"""Test if the repository contains any commits
:returns bool: True if the repository contains no commits.
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
def __contains__(self, rev):
"""Test if the repository contains the specified revision
"""
return NotImplementedError
@abstractmethod
def __len__(self):
"""Returns the number of commits in the repository
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
"""Get commit logs
:param revrange: Either a single revision or a range of revisions as a
2-element list or tuple.
:param int limit: Limit the number of log entries.
:param bool firstparent: Only follow the first parent of merges.
:param bool merges: True means only merges, False means no merges,
None means both merges and non-merges.
:param str path: Only match commits containing changes on this path.
:param bool follow: Follow file history across renames.
:returns: log information
:rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry`
If revrange is None, return a list of all log entries in reverse
chronological order.
If revrange is a single revision, return a single log entry.
If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log
entries starting at B and following that branch back to A or one of its
ancestors (not inclusive. If A is None, follow branch B back to the
beginning of history. If B is None, list all descendants in reverse
chronological order.
"""
raise NotImplementedError
@abstractmethod
def changed(self, rev):
"""Files that changed from the rev's parent(s)
:param rev: The revision to get the files that changed.
:type rev: list of :class:`FileChangeInfo`.
"""
raise NotImplementedError
@abstractmethod
def pdiff(self, rev):
"""Diff from the rev's parent(s)
:param rev: The rev to compute the diff from its parent.
:returns str: The diff.
The returned string is a unified diff that the rev introduces with a
prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def diff(self, rev_a, rev_b, path=None):
"""Diff of two revisions
:param rev_a: The start revision.
:param rev_b: The end revision.
:param path: If not None, return diff for only that file.
:type path: None or str
:returns str: The diff.
The returned string contains the unified diff from rev_a to rev_b with
a prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def ancestor(self, rev1, rev2):
"""Find most recent common ancestor of two revisions
:param rev1: First revision.
:param rev2: Second revision.
:returns: The common ancestor revision between the two.
"""
raise NotImplementedError
@abstractmethod
def blame(self, rev, path):
"""Blame (a.k.a. annotate, praise) a file
:param rev: The revision to blame.
:param str path: The path to blame.
:returns: list of annotated lines of the given path
:rtype: list of :class:`BlameInfo` objects
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def tip(self, head):
"""Find the tip of a named head
:param str head: name of head to look up
:returns: revision identifier of head
The returned identifier should be a valid input for :meth:`VCSRepo.ls`.
and respect the branch name in the returned identifier if applicable.
"""
raise NotImplementedError
|
ScottDuckworth/python-anyvcs
|
anyvcs/common.py
|
VCSRepo.log
|
python
|
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
raise NotImplementedError
|
Get commit logs
:param revrange: Either a single revision or a range of revisions as a
2-element list or tuple.
:param int limit: Limit the number of log entries.
:param bool firstparent: Only follow the first parent of merges.
:param bool merges: True means only merges, False means no merges,
None means both merges and non-merges.
:param str path: Only match commits containing changes on this path.
:param bool follow: Follow file history across renames.
:returns: log information
:rtype: :class:`CommitLogEntry` or list of :class:`CommitLogEntry`
If revrange is None, return a list of all log entries in reverse
chronological order.
If revrange is a single revision, return a single log entry.
If revrange is a 2 element list [A,B] or tuple (A,B), return a list of log
entries starting at B and following that branch back to A or one of its
ancestors (not inclusive. If A is None, follow branch B back to the
beginning of history. If B is None, list all descendants in reverse
chronological order.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/common.py#L460-L489
| null |
class VCSRepo(object):
__metaclass__ = ABCMetaDocStringInheritor
def __init__(self, path, encoding='utf-8'):
"""Open an existing repository"""
self.path = path
self.encoding = encoding
@abstractproperty
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
raise NotImplementedError
@property
def _commit_cache(self):
try:
return self._commit_cache_v
except AttributeError:
commit_cache_path = os.path.join(self.private_path, 'commit-cache')
self._commit_cache_v = CommitLogCache(commit_cache_path)
return self._commit_cache_v
def _command(self, cmd, input=None, **kwargs):
kwargs.setdefault('cwd', self.path)
return command(cmd, **kwargs)
@classmethod
def cleanPath(cls, path):
path = path.lstrip('/')
path = multislash_rx.sub('/', path)
return path
@abstractmethod
def canonical_rev(self, rev):
""" Get the canonical revision identifier
:param rev: The revision to canonicalize.
:returns: The canonicalized revision
The canonical revision is the revision which is natively supported by
the underlying VCS type. In some cases, anyvcs may annotate a revision
identifier to also encode branch information which is not safe to use
directly with the VCS itself (e.g. as created by :meth:`compose_rev`).
This method is a means of converting back to canonical form.
"""
raise NotImplementedError
@abstractmethod
def compose_rev(self, branch, rev):
""" Compose a revision identifier which encodes branch and revision.
:param str branch: A branch name
:param rev: A revision (can be canonical or as constructed by
:meth:`compose_rev()` or :meth:`tip()`)
The revision identifier encodes branch and revision information
according to the particular VCS type. This is a means to unify the
various branching models under a common interface.
"""
raise NotImplementedError
@abstractmethod
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
"""List directory or file
:param rev: The revision to use.
:param path: The path to list. May start with a '/' or not. Directories
may end with a '/' or not.
:param recursive: Recursively list files in subdirectories.
:param recursive_dirs: Used when recursive=True, also list directories.
:param directory: If path is a directory, list path itself instead of
its contents.
:param report: A list or tuple of extra attributes to return that may
require extra processing. Recognized values are 'size',
'target', 'executable', and 'commit'.
Returns a list of dictionaries with the following keys:
**type**
The type of the file: 'f' for file, 'd' for directory, 'l' for
symlink.
**name**
The name of the file. Not present if directory=True.
**size**
The size of the file. Only present for files when 'size' is in
report.
**target**
The target of the symlink. Only present for symlinks when
'target' is in report.
**executable**
True if the file is executable, False otherwise. Only present
for files when 'executable' is in report.
Raises PathDoesNotExist if the path does not exist.
"""
raise NotImplementedError
@abstractmethod
def cat(self, rev, path):
"""Get file contents
:param rev: The revision to use.
:param str path: The path to the file. Must be a file.
:returns: The contents of the file.
:rtype: str or bytes
:raises PathDoesNotExist: If the path does not exist.
:raises BadFileType: If the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def readlink(self, rev, path):
"""Get symbolic link target
:param rev: The revision to use.
:param str path: The path to the file. Must be a symbolic link.
:returns str: The target of the symbolic link.
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a symbolic link.
"""
raise NotImplementedError
@abstractmethod
def branches(self):
"""Get list of branches
:returns: The branches in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def tags(self):
"""Get list of tags
:returns: The tags in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def heads(self):
"""Get list of heads
:returns: The heads in the repository
:rtype: list of str
"""
raise NotImplementedError
@abstractmethod
def empty(self):
"""Test if the repository contains any commits
:returns bool: True if the repository contains no commits.
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
def __contains__(self, rev):
"""Test if the repository contains the specified revision
"""
return NotImplementedError
@abstractmethod
def __len__(self):
"""Returns the number of commits in the repository
Commits that exist by default (e.g. a zero commit) are not counted.
"""
return NotImplementedError
@abstractmethod
@abstractmethod
def changed(self, rev):
"""Files that changed from the rev's parent(s)
:param rev: The revision to get the files that changed.
:type rev: list of :class:`FileChangeInfo`.
"""
raise NotImplementedError
@abstractmethod
def pdiff(self, rev):
"""Diff from the rev's parent(s)
:param rev: The rev to compute the diff from its parent.
:returns str: The diff.
The returned string is a unified diff that the rev introduces with a
prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def diff(self, rev_a, rev_b, path=None):
"""Diff of two revisions
:param rev_a: The start revision.
:param rev_b: The end revision.
:param path: If not None, return diff for only that file.
:type path: None or str
:returns str: The diff.
The returned string contains the unified diff from rev_a to rev_b with
a prefix of one (suitable for input to patch -p1).
"""
raise NotImplementedError
@abstractmethod
def ancestor(self, rev1, rev2):
"""Find most recent common ancestor of two revisions
:param rev1: First revision.
:param rev2: Second revision.
:returns: The common ancestor revision between the two.
"""
raise NotImplementedError
@abstractmethod
def blame(self, rev, path):
"""Blame (a.k.a. annotate, praise) a file
:param rev: The revision to blame.
:param str path: The path to blame.
:returns: list of annotated lines of the given path
:rtype: list of :class:`BlameInfo` objects
:raises PathDoesNotExist: if the path does not exist.
:raises BadFileType: if the path is not a file.
"""
raise NotImplementedError
@abstractmethod
def tip(self, head):
"""Find the tip of a named head
:param str head: name of head to look up
:returns: revision identifier of head
The returned identifier should be a valid input for :meth:`VCSRepo.ls`.
and respect the branch name in the returned identifier if applicable.
"""
raise NotImplementedError
|
ScottDuckworth/python-anyvcs
|
anyvcs/__init__.py
|
clone
|
python
|
def clone(srcpath, destpath, vcs=None):
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath)
|
Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/__init__.py#L32-L46
|
[
"def probe(path):\n \"\"\"Probe a repository for its type.\n\n :param str path: The path of the repository\n :raises UnknownVCSType: if the repository type couldn't be inferred\n :returns str: either ``git``, ``hg``, or ``svn``\n\n This function employs some heuristics to guess the type of the repository.\n\n \"\"\"\n import os\n from .common import UnknownVCSType\n if os.path.isdir(os.path.join(path, '.git')):\n return 'git'\n elif os.path.isdir(os.path.join(path, '.hg')):\n return 'hg'\n elif (\n os.path.isfile(os.path.join(path, 'config')) and\n os.path.isdir(os.path.join(path, 'objects')) and\n os.path.isdir(os.path.join(path, 'refs')) and\n os.path.isdir(os.path.join(path, 'branches'))\n ):\n return 'git'\n elif (\n os.path.isfile(os.path.join(path, 'format')) and\n os.path.isdir(os.path.join(path, 'conf')) and\n os.path.isdir(os.path.join(path, 'db')) and\n os.path.isdir(os.path.join(path, 'locks'))\n ):\n return 'svn'\n else:\n raise UnknownVCSType(path)\n",
"def _get_repo_class(vcs):\n from .common import UnknownVCSType\n if vcs == 'git':\n from .git import GitRepo\n return GitRepo\n elif vcs == 'hg':\n from .hg import HgRepo\n return HgRepo\n elif vcs == 'svn':\n from .svn import SvnRepo\n return SvnRepo\n else:\n raise UnknownVCSType(vcs)\n"
] |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .version import __version__
def create(path, vcs):
"""Create a new repository
:param str path: The path where to create the repository.
:param str vcs: Either ``git``, ``hg``, or ``svn``
"""
cls = _get_repo_class(vcs)
return cls.create(path)
def probe(path):
"""Probe a repository for its type.
:param str path: The path of the repository
:raises UnknownVCSType: if the repository type couldn't be inferred
:returns str: either ``git``, ``hg``, or ``svn``
This function employs some heuristics to guess the type of the repository.
"""
import os
from .common import UnknownVCSType
if os.path.isdir(os.path.join(path, '.git')):
return 'git'
elif os.path.isdir(os.path.join(path, '.hg')):
return 'hg'
elif (
os.path.isfile(os.path.join(path, 'config')) and
os.path.isdir(os.path.join(path, 'objects')) and
os.path.isdir(os.path.join(path, 'refs')) and
os.path.isdir(os.path.join(path, 'branches'))
):
return 'git'
elif (
os.path.isfile(os.path.join(path, 'format')) and
os.path.isdir(os.path.join(path, 'conf')) and
os.path.isdir(os.path.join(path, 'db')) and
os.path.isdir(os.path.join(path, 'locks'))
):
return 'svn'
else:
raise UnknownVCSType(path)
def open(path, vcs=None):
"""Open an existing repository
:param str path: The path of the repository
:param vcs: If specified, assume the given repository type to avoid
auto-detection. Either ``git``, ``hg``, or ``svn``.
:raises UnknownVCSType: if the repository type couldn't be inferred
If ``vcs`` is not specified, it is inferred via :func:`probe`.
"""
import os
assert os.path.isdir(path), path + ' is not a directory'
vcs = vcs or probe(path)
cls = _get_repo_class(vcs)
return cls(path)
def _get_repo_class(vcs):
from .common import UnknownVCSType
if vcs == 'git':
from .git import GitRepo
return GitRepo
elif vcs == 'hg':
from .hg import HgRepo
return HgRepo
elif vcs == 'svn':
from .svn import SvnRepo
return SvnRepo
else:
raise UnknownVCSType(vcs)
# vi:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
|
ScottDuckworth/python-anyvcs
|
anyvcs/__init__.py
|
probe
|
python
|
def probe(path):
import os
from .common import UnknownVCSType
if os.path.isdir(os.path.join(path, '.git')):
return 'git'
elif os.path.isdir(os.path.join(path, '.hg')):
return 'hg'
elif (
os.path.isfile(os.path.join(path, 'config')) and
os.path.isdir(os.path.join(path, 'objects')) and
os.path.isdir(os.path.join(path, 'refs')) and
os.path.isdir(os.path.join(path, 'branches'))
):
return 'git'
elif (
os.path.isfile(os.path.join(path, 'format')) and
os.path.isdir(os.path.join(path, 'conf')) and
os.path.isdir(os.path.join(path, 'db')) and
os.path.isdir(os.path.join(path, 'locks'))
):
return 'svn'
else:
raise UnknownVCSType(path)
|
Probe a repository for its type.
:param str path: The path of the repository
:raises UnknownVCSType: if the repository type couldn't be inferred
:returns str: either ``git``, ``hg``, or ``svn``
This function employs some heuristics to guess the type of the repository.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/__init__.py#L60-L91
| null |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .version import __version__
def clone(srcpath, destpath, vcs=None):
"""Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
"""
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath)
def create(path, vcs):
"""Create a new repository
:param str path: The path where to create the repository.
:param str vcs: Either ``git``, ``hg``, or ``svn``
"""
cls = _get_repo_class(vcs)
return cls.create(path)
def open(path, vcs=None):
"""Open an existing repository
:param str path: The path of the repository
:param vcs: If specified, assume the given repository type to avoid
auto-detection. Either ``git``, ``hg``, or ``svn``.
:raises UnknownVCSType: if the repository type couldn't be inferred
If ``vcs`` is not specified, it is inferred via :func:`probe`.
"""
import os
assert os.path.isdir(path), path + ' is not a directory'
vcs = vcs or probe(path)
cls = _get_repo_class(vcs)
return cls(path)
def _get_repo_class(vcs):
from .common import UnknownVCSType
if vcs == 'git':
from .git import GitRepo
return GitRepo
elif vcs == 'hg':
from .hg import HgRepo
return HgRepo
elif vcs == 'svn':
from .svn import SvnRepo
return SvnRepo
else:
raise UnknownVCSType(vcs)
# vi:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
|
ScottDuckworth/python-anyvcs
|
anyvcs/__init__.py
|
open
|
python
|
def open(path, vcs=None):
import os
assert os.path.isdir(path), path + ' is not a directory'
vcs = vcs or probe(path)
cls = _get_repo_class(vcs)
return cls(path)
|
Open an existing repository
:param str path: The path of the repository
:param vcs: If specified, assume the given repository type to avoid
auto-detection. Either ``git``, ``hg``, or ``svn``.
:raises UnknownVCSType: if the repository type couldn't be inferred
If ``vcs`` is not specified, it is inferred via :func:`probe`.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/__init__.py#L94-L109
|
[
"def probe(path):\n \"\"\"Probe a repository for its type.\n\n :param str path: The path of the repository\n :raises UnknownVCSType: if the repository type couldn't be inferred\n :returns str: either ``git``, ``hg``, or ``svn``\n\n This function employs some heuristics to guess the type of the repository.\n\n \"\"\"\n import os\n from .common import UnknownVCSType\n if os.path.isdir(os.path.join(path, '.git')):\n return 'git'\n elif os.path.isdir(os.path.join(path, '.hg')):\n return 'hg'\n elif (\n os.path.isfile(os.path.join(path, 'config')) and\n os.path.isdir(os.path.join(path, 'objects')) and\n os.path.isdir(os.path.join(path, 'refs')) and\n os.path.isdir(os.path.join(path, 'branches'))\n ):\n return 'git'\n elif (\n os.path.isfile(os.path.join(path, 'format')) and\n os.path.isdir(os.path.join(path, 'conf')) and\n os.path.isdir(os.path.join(path, 'db')) and\n os.path.isdir(os.path.join(path, 'locks'))\n ):\n return 'svn'\n else:\n raise UnknownVCSType(path)\n",
"def _get_repo_class(vcs):\n from .common import UnknownVCSType\n if vcs == 'git':\n from .git import GitRepo\n return GitRepo\n elif vcs == 'hg':\n from .hg import HgRepo\n return HgRepo\n elif vcs == 'svn':\n from .svn import SvnRepo\n return SvnRepo\n else:\n raise UnknownVCSType(vcs)\n"
] |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .version import __version__
def clone(srcpath, destpath, vcs=None):
"""Clone an existing repository.
:param str srcpath: Path to an existing repository
:param str destpath: Desired path of new repository
:param str vcs: Either ``git``, ``hg``, or ``svn``
:returns VCSRepo: The newly cloned repository
If ``vcs`` is not given, then the repository type is discovered from
``srcpath`` via :func:`probe`.
"""
vcs = vcs or probe(srcpath)
cls = _get_repo_class(vcs)
return cls.clone(srcpath, destpath)
def create(path, vcs):
"""Create a new repository
:param str path: The path where to create the repository.
:param str vcs: Either ``git``, ``hg``, or ``svn``
"""
cls = _get_repo_class(vcs)
return cls.create(path)
def probe(path):
"""Probe a repository for its type.
:param str path: The path of the repository
:raises UnknownVCSType: if the repository type couldn't be inferred
:returns str: either ``git``, ``hg``, or ``svn``
This function employs some heuristics to guess the type of the repository.
"""
import os
from .common import UnknownVCSType
if os.path.isdir(os.path.join(path, '.git')):
return 'git'
elif os.path.isdir(os.path.join(path, '.hg')):
return 'hg'
elif (
os.path.isfile(os.path.join(path, 'config')) and
os.path.isdir(os.path.join(path, 'objects')) and
os.path.isdir(os.path.join(path, 'refs')) and
os.path.isdir(os.path.join(path, 'branches'))
):
return 'git'
elif (
os.path.isfile(os.path.join(path, 'format')) and
os.path.isdir(os.path.join(path, 'conf')) and
os.path.isdir(os.path.join(path, 'db')) and
os.path.isdir(os.path.join(path, 'locks'))
):
return 'svn'
else:
raise UnknownVCSType(path)
def _get_repo_class(vcs):
from .common import UnknownVCSType
if vcs == 'git':
from .git import GitRepo
return GitRepo
elif vcs == 'hg':
from .hg import HgRepo
return HgRepo
elif vcs == 'svn':
from .svn import SvnRepo
return SvnRepo
else:
raise UnknownVCSType(vcs)
# vi:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.clone
|
python
|
def clone(cls, srcpath, destpath):
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
|
Copy a main repository to a new location.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L118-L138
| null |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.create
|
python
|
def create(cls, path):
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
|
Create a new repository
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L141-L150
| null |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.proplist
|
python
|
def proplist(self, rev, path=None):
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
|
List Subversion properties of the path
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L193-L200
|
[
"def _join(*args):\n return '/'.join(arg for arg in args if arg)\n",
"def cleanPath(cls, path):\n path = multislash_rx.sub('/', path)\n if not path.startswith('/'):\n path = '/' + path\n return path\n",
"def _proplist(self, rev, path):\n cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']\n output = self._command(cmd).decode(self.encoding)\n props = [x.strip() for x in output.splitlines()]\n #\n # Subversion 1.8 adds extra user output when given a path argument.\n #\n if not path is None and SVN_VERSION >= ('1', '8'):\n return props[1:]\n else:\n return props\n",
"def _maprev(self, rev):\n if isinstance(rev, int):\n return (rev, '/')\n m = head_rev_rx.match(rev)\n assert m, 'invalid rev'\n head, rev = m.group('head', 'rev')\n if rev:\n rev = int(rev)\n else:\n rev = self.youngest()\n if head is None:\n return (rev, '/')\n elif head == 'HEAD':\n return (rev, '/')\n else:\n return (rev, '/' + head)\n"
] |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.propget
|
python
|
def propget(self, prop, rev, path=None):
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
|
Get Subversion property value of the path
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L206-L213
|
[
"def _join(*args):\n return '/'.join(arg for arg in args if arg)\n",
"def cleanPath(cls, path):\n path = multislash_rx.sub('/', path)\n if not path.startswith('/'):\n path = '/' + path\n return path\n",
"def _propget(self, prop, rev, path):\n cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']\n return self._command(cmd).decode()\n",
"def _maprev(self, rev):\n if isinstance(rev, int):\n return (rev, '/')\n m = head_rev_rx.match(rev)\n assert m, 'invalid rev'\n head, rev = m.group('head', 'rev')\n if rev:\n rev = int(rev)\n else:\n rev = self.youngest()\n if head is None:\n return (rev, '/')\n elif head == 'HEAD':\n return (rev, '/')\n else:\n return (rev, '/' + head)\n"
] |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.dump
|
python
|
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
|
Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L746-L776
| null |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/svn.py
|
SvnRepo.load
|
python
|
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
|
Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L778-L811
| null |
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
|
ScottDuckworth/python-anyvcs
|
anyvcs/hg.py
|
HgRepo.clone
|
python
|
def clone(cls, srcpath, destpath):
# Mercurial will not create intermediate directories for clones.
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath)
|
Clone an existing repository to a new bare repository.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/hg.py#L66-L76
| null |
class HgRepo(VCSRepo):
"""A Mercurial repository
Valid revisions are anything that Mercurial considers as a revision.
"""
@classmethod
@classmethod
def create(cls, path):
"""Create a new repository"""
cmd = [HG, 'init', path]
subprocess.check_call(cmd)
return cls(path)
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
path = os.path.join(self.path, '.hg', '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
@property
def _object_cache(self):
try:
return self._object_cache_v
except AttributeError:
object_cache_path = os.path.join(self.private_path, 'object-cache')
self._object_cache_v = HashDict(object_cache_path)
return self._object_cache_v
def canonical_rev(self, rev):
if isinstance(rev, str) and canonical_rev_rx.match(rev):
return rev
else:
cmd = [HG, 'log', '--template={node}', '-r', str(rev)]
return self._command(cmd).decode()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def _revnum(self, rev):
if isinstance(rev, int):
return rev
elif isinstance(rev, str) and rev.isdigit():
return int(rev)
else:
cmd = [HG, 'log', '--template={rev}', '-r', str(rev)]
return int(self._command(cmd))
def _ls(
self, rev, path, recursive=False, recursive_dirs=False, directory=False
):
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
if path == '':
ltrim = 0
prefix = ''
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [HG, 'manifest', '--debug', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
if not output:
return
dirs = set()
exists = False
for line in output.splitlines():
m = manifest_rx.match(line)
assert m, 'unexpected output: ' + line
t, name, objid = m.group('type', 'name', 'object')
if name.startswith(prefix) or (not forcedir and name == path):
if directory and name.startswith(prefix):
yield ('d', path, '', None)
return
exists = True
entry_name = name[ltrim:]
if '/' in entry_name:
p = parent_dirs(entry_name)
if not recursive:
d = next(p)
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
continue
if recursive_dirs:
for d in p:
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
yield (t, name, entry_name, objid)
if not exists:
raise PathDoesNotExist(rev, path)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
revstr = str(rev)
path = type(self).cleanPath(path)
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self.canonical_rev(revstr)
return [entry]
if 'commit' in report:
import fcntl
import tempfile
files_cache_path = os.path.join(self.private_path, 'files-cache.log')
with open(files_cache_path, 'a+') as files_cache:
fcntl.lockf(files_cache, fcntl.LOCK_EX, 0, 0, os.SEEK_CUR)
files_cache.seek(0)
log = files_cache.read().split('\0')
assert log.pop() == ''
if log:
startlog = int(log[-1].splitlines()[0]) + 1
if startlog >= len(self):
startlog = None
else:
startlog = 0
if startlog is not None:
with tempfile.NamedTemporaryFile() as style:
style.write((
r"changeset = '{rev}\n{node}\n{parents}\n{files}\0'" '\n'
r"parent = '{rev} '" '\n'
r"file = '{file|escape}\n'" '\n'
).encode())
style.flush()
cmd = [HG, 'log', '--style', style.name, '-r', '%d:' % startlog]
output = self._command(cmd).decode(self.encoding, 'replace')
files_cache.write(output)
extend = output.split('\0')
assert extend.pop() == ''
log.extend(extend)
results = []
lookup_commit = {}
for t, fullpath, name, objid in self._ls(revstr, path, recursive, recursive_dirs, directory):
entry = attrdict(path=fullpath)
if name:
entry.name = name
if t == 'd':
entry.type = 'd'
elif t in ' *':
entry.type = 'f'
if 'executable' in report:
entry.executable = t == '*'
if 'size' in report:
entry.size = len(self._cat(revstr, fullpath))
elif t == '@':
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(revstr, name).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + line
if 'commit' in report:
lookup = True
if objid:
try:
import hashlib
concat = (fullpath + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
entry.commit = self._object_cache[k].decode()
entry._commit_cached = True
lookup = False
except KeyError:
pass
if lookup:
if name:
p = type(self).cleanPath(path + '/' + name)
else:
p = path
lookup_commit[p] = (entry, objid)
results.append(entry)
if 'commit' in report:
import heapq
ancestors = [-self._revnum(revstr)]
while ancestors and lookup_commit:
r = -heapq.heappop(ancestors)
lines = log[r].splitlines()
parents = lines[2]
if parents:
for x in parents.split():
x = int(x)
if x != -1:
if -x not in ancestors:
heapq.heappush(ancestors, -x)
elif r > 0:
x = r - 1
if x not in ancestors:
heapq.heappush(ancestors, -x)
for p in list(lookup_commit):
prefix = p.rstrip('/') + '/'
for l in lines[3:]:
if l == p or l.startswith(prefix):
commit = str(lines[1])
entry, objid = lookup_commit[p]
entry.commit = commit
if objid:
import hashlib
concat = (p + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
self._object_cache[k] = commit.encode()
del lookup_commit[p]
break
return results
def _cat(self, rev, path):
cmd = [HG, 'cat', '-r', rev, path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._cat(str(rev), path).decode(self.encoding, 'replace')
def _parse_heads(self, cmd):
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = parse_heads_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def branches(self):
cmd = [HG, 'branches']
return self._parse_heads(cmd)
def tags(self):
cmd = [HG, 'tags']
return self._parse_heads(cmd)
def bookmarks(self):
"""Get list of bookmarks"""
cmd = [HG, 'bookmarks']
output = self._command(cmd).decode(self.encoding, 'replace')
if output.startswith('no bookmarks set'):
return []
results = []
for line in output.splitlines():
m = bookmarks_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def heads(self):
return self.branches() + self.tags() + self.bookmarks()
def empty(self):
cmd = [HG, 'log', '--template=a', '-l1']
output = self._command(cmd)
return len(output) == 0
def __contains__(self, rev):
cmd = [HG, 'log', '--template=a', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [HG, 'id', '-n', '-r', 'tip']
output = self._command(cmd)
return int(output) + 1
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [
HG, 'log', '--debug', '--template={node}\\0{parents}\\0'
'{date|hgdate}\\0{author|nonempty}'
'\\0{desc|tabindent|nonempty}\\0\\0']
if limit is not None:
cmd.append('-l' + str(limit))
if firstparent:
cmd.append('--follow-first')
if merges is not None:
if merges:
cmd.append('--only-merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
pass
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
pass
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1]])
else:
if revrange[1] is None:
cmd.extend(['-r', 'reverse(descendants(%s))' % revrange[0]])
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1], '--prune', str(revrange[0])])
else:
entry = self._commit_cache.get(self.canonical_rev(revrange))
if entry:
entry._cached = True
return entry
cmd.extend(['-r', str(revrange)])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
logs = output.split('\0\0')
logs.pop()
for log in logs:
rev, parents, date, author, message = log.split('\0', 4)
parents = [
x[1] for x in filter(
lambda x: x[0] != '-1',
(x.split(':') for x in parents.split())
)
]
date = parse_hgdate(date)
message = message.replace('\n\t', '\n')
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [HG, 'status', '-C', '--change', str(rev)]
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
copy = None
for line in reversed(output.splitlines()):
if line.startswith(' '):
copy = line.lstrip()
else:
status, path = line.split(None, 1)
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
copy = None
results.reverse()
return results
def pdiff(self, rev):
cmd = [HG, 'log', '--template=a', '-p', '-r', str(rev)]
return self._command(cmd)[1:].decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [HG, 'diff', '-r', rev_a, '-r', rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [HG, 'log', '--template={node}', '-r', 'ancestor(%s, %s)' % (rev1, rev2)]
output = self._command(cmd).decode()
if output == '':
return None
else:
return output
def _blame(self, rev, path):
cmd = [HG, 'annotate', '-unv', '-r', rev, '--', path]
output = self._command(cmd).decode(self.encoding, 'replace')
revs = {}
results = []
cat = self._cat(rev, path)
for line, text in zip(output.splitlines(), cat.splitlines()):
m = annotate_rx.match(line)
assert m, 'unexpected output: ' + line
rev, author = m.group('rev', 'author')
try:
rev, date = revs[rev]
except KeyError:
cmd = [HG, 'log', '--template={node}\n{date|hgdate}', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
rev, date = output.split('\n', 1)
date = parse_hgdate(date)
revs[rev] = rev, date
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def tip(self, head):
return self.canonical_rev(head)
|
ScottDuckworth/python-anyvcs
|
anyvcs/hg.py
|
HgRepo.create
|
python
|
def create(cls, path):
cmd = [HG, 'init', path]
subprocess.check_call(cmd)
return cls(path)
|
Create a new repository
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/hg.py#L79-L83
| null |
class HgRepo(VCSRepo):
"""A Mercurial repository
Valid revisions are anything that Mercurial considers as a revision.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Clone an existing repository to a new bare repository."""
# Mercurial will not create intermediate directories for clones.
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath)
@classmethod
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
path = os.path.join(self.path, '.hg', '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
@property
def _object_cache(self):
try:
return self._object_cache_v
except AttributeError:
object_cache_path = os.path.join(self.private_path, 'object-cache')
self._object_cache_v = HashDict(object_cache_path)
return self._object_cache_v
def canonical_rev(self, rev):
if isinstance(rev, str) and canonical_rev_rx.match(rev):
return rev
else:
cmd = [HG, 'log', '--template={node}', '-r', str(rev)]
return self._command(cmd).decode()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def _revnum(self, rev):
if isinstance(rev, int):
return rev
elif isinstance(rev, str) and rev.isdigit():
return int(rev)
else:
cmd = [HG, 'log', '--template={rev}', '-r', str(rev)]
return int(self._command(cmd))
def _ls(
self, rev, path, recursive=False, recursive_dirs=False, directory=False
):
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
if path == '':
ltrim = 0
prefix = ''
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [HG, 'manifest', '--debug', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
if not output:
return
dirs = set()
exists = False
for line in output.splitlines():
m = manifest_rx.match(line)
assert m, 'unexpected output: ' + line
t, name, objid = m.group('type', 'name', 'object')
if name.startswith(prefix) or (not forcedir and name == path):
if directory and name.startswith(prefix):
yield ('d', path, '', None)
return
exists = True
entry_name = name[ltrim:]
if '/' in entry_name:
p = parent_dirs(entry_name)
if not recursive:
d = next(p)
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
continue
if recursive_dirs:
for d in p:
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
yield (t, name, entry_name, objid)
if not exists:
raise PathDoesNotExist(rev, path)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
revstr = str(rev)
path = type(self).cleanPath(path)
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self.canonical_rev(revstr)
return [entry]
if 'commit' in report:
import fcntl
import tempfile
files_cache_path = os.path.join(self.private_path, 'files-cache.log')
with open(files_cache_path, 'a+') as files_cache:
fcntl.lockf(files_cache, fcntl.LOCK_EX, 0, 0, os.SEEK_CUR)
files_cache.seek(0)
log = files_cache.read().split('\0')
assert log.pop() == ''
if log:
startlog = int(log[-1].splitlines()[0]) + 1
if startlog >= len(self):
startlog = None
else:
startlog = 0
if startlog is not None:
with tempfile.NamedTemporaryFile() as style:
style.write((
r"changeset = '{rev}\n{node}\n{parents}\n{files}\0'" '\n'
r"parent = '{rev} '" '\n'
r"file = '{file|escape}\n'" '\n'
).encode())
style.flush()
cmd = [HG, 'log', '--style', style.name, '-r', '%d:' % startlog]
output = self._command(cmd).decode(self.encoding, 'replace')
files_cache.write(output)
extend = output.split('\0')
assert extend.pop() == ''
log.extend(extend)
results = []
lookup_commit = {}
for t, fullpath, name, objid in self._ls(revstr, path, recursive, recursive_dirs, directory):
entry = attrdict(path=fullpath)
if name:
entry.name = name
if t == 'd':
entry.type = 'd'
elif t in ' *':
entry.type = 'f'
if 'executable' in report:
entry.executable = t == '*'
if 'size' in report:
entry.size = len(self._cat(revstr, fullpath))
elif t == '@':
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(revstr, name).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + line
if 'commit' in report:
lookup = True
if objid:
try:
import hashlib
concat = (fullpath + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
entry.commit = self._object_cache[k].decode()
entry._commit_cached = True
lookup = False
except KeyError:
pass
if lookup:
if name:
p = type(self).cleanPath(path + '/' + name)
else:
p = path
lookup_commit[p] = (entry, objid)
results.append(entry)
if 'commit' in report:
import heapq
ancestors = [-self._revnum(revstr)]
while ancestors and lookup_commit:
r = -heapq.heappop(ancestors)
lines = log[r].splitlines()
parents = lines[2]
if parents:
for x in parents.split():
x = int(x)
if x != -1:
if -x not in ancestors:
heapq.heappush(ancestors, -x)
elif r > 0:
x = r - 1
if x not in ancestors:
heapq.heappush(ancestors, -x)
for p in list(lookup_commit):
prefix = p.rstrip('/') + '/'
for l in lines[3:]:
if l == p or l.startswith(prefix):
commit = str(lines[1])
entry, objid = lookup_commit[p]
entry.commit = commit
if objid:
import hashlib
concat = (p + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
self._object_cache[k] = commit.encode()
del lookup_commit[p]
break
return results
def _cat(self, rev, path):
cmd = [HG, 'cat', '-r', rev, path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._cat(str(rev), path).decode(self.encoding, 'replace')
def _parse_heads(self, cmd):
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = parse_heads_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def branches(self):
cmd = [HG, 'branches']
return self._parse_heads(cmd)
def tags(self):
cmd = [HG, 'tags']
return self._parse_heads(cmd)
def bookmarks(self):
"""Get list of bookmarks"""
cmd = [HG, 'bookmarks']
output = self._command(cmd).decode(self.encoding, 'replace')
if output.startswith('no bookmarks set'):
return []
results = []
for line in output.splitlines():
m = bookmarks_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def heads(self):
return self.branches() + self.tags() + self.bookmarks()
def empty(self):
cmd = [HG, 'log', '--template=a', '-l1']
output = self._command(cmd)
return len(output) == 0
def __contains__(self, rev):
cmd = [HG, 'log', '--template=a', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [HG, 'id', '-n', '-r', 'tip']
output = self._command(cmd)
return int(output) + 1
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [
HG, 'log', '--debug', '--template={node}\\0{parents}\\0'
'{date|hgdate}\\0{author|nonempty}'
'\\0{desc|tabindent|nonempty}\\0\\0']
if limit is not None:
cmd.append('-l' + str(limit))
if firstparent:
cmd.append('--follow-first')
if merges is not None:
if merges:
cmd.append('--only-merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
pass
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
pass
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1]])
else:
if revrange[1] is None:
cmd.extend(['-r', 'reverse(descendants(%s))' % revrange[0]])
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1], '--prune', str(revrange[0])])
else:
entry = self._commit_cache.get(self.canonical_rev(revrange))
if entry:
entry._cached = True
return entry
cmd.extend(['-r', str(revrange)])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
logs = output.split('\0\0')
logs.pop()
for log in logs:
rev, parents, date, author, message = log.split('\0', 4)
parents = [
x[1] for x in filter(
lambda x: x[0] != '-1',
(x.split(':') for x in parents.split())
)
]
date = parse_hgdate(date)
message = message.replace('\n\t', '\n')
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [HG, 'status', '-C', '--change', str(rev)]
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
copy = None
for line in reversed(output.splitlines()):
if line.startswith(' '):
copy = line.lstrip()
else:
status, path = line.split(None, 1)
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
copy = None
results.reverse()
return results
def pdiff(self, rev):
cmd = [HG, 'log', '--template=a', '-p', '-r', str(rev)]
return self._command(cmd)[1:].decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [HG, 'diff', '-r', rev_a, '-r', rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [HG, 'log', '--template={node}', '-r', 'ancestor(%s, %s)' % (rev1, rev2)]
output = self._command(cmd).decode()
if output == '':
return None
else:
return output
def _blame(self, rev, path):
cmd = [HG, 'annotate', '-unv', '-r', rev, '--', path]
output = self._command(cmd).decode(self.encoding, 'replace')
revs = {}
results = []
cat = self._cat(rev, path)
for line, text in zip(output.splitlines(), cat.splitlines()):
m = annotate_rx.match(line)
assert m, 'unexpected output: ' + line
rev, author = m.group('rev', 'author')
try:
rev, date = revs[rev]
except KeyError:
cmd = [HG, 'log', '--template={node}\n{date|hgdate}', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
rev, date = output.split('\n', 1)
date = parse_hgdate(date)
revs[rev] = rev, date
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def tip(self, head):
return self.canonical_rev(head)
|
ScottDuckworth/python-anyvcs
|
anyvcs/hg.py
|
HgRepo.private_path
|
python
|
def private_path(self):
path = os.path.join(self.path, '.hg', '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
|
Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/hg.py#L86-L99
| null |
class HgRepo(VCSRepo):
"""A Mercurial repository
Valid revisions are anything that Mercurial considers as a revision.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Clone an existing repository to a new bare repository."""
# Mercurial will not create intermediate directories for clones.
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath)
@classmethod
def create(cls, path):
"""Create a new repository"""
cmd = [HG, 'init', path]
subprocess.check_call(cmd)
return cls(path)
@property
@property
def _object_cache(self):
try:
return self._object_cache_v
except AttributeError:
object_cache_path = os.path.join(self.private_path, 'object-cache')
self._object_cache_v = HashDict(object_cache_path)
return self._object_cache_v
def canonical_rev(self, rev):
if isinstance(rev, str) and canonical_rev_rx.match(rev):
return rev
else:
cmd = [HG, 'log', '--template={node}', '-r', str(rev)]
return self._command(cmd).decode()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def _revnum(self, rev):
if isinstance(rev, int):
return rev
elif isinstance(rev, str) and rev.isdigit():
return int(rev)
else:
cmd = [HG, 'log', '--template={rev}', '-r', str(rev)]
return int(self._command(cmd))
def _ls(
self, rev, path, recursive=False, recursive_dirs=False, directory=False
):
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
if path == '':
ltrim = 0
prefix = ''
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [HG, 'manifest', '--debug', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
if not output:
return
dirs = set()
exists = False
for line in output.splitlines():
m = manifest_rx.match(line)
assert m, 'unexpected output: ' + line
t, name, objid = m.group('type', 'name', 'object')
if name.startswith(prefix) or (not forcedir and name == path):
if directory and name.startswith(prefix):
yield ('d', path, '', None)
return
exists = True
entry_name = name[ltrim:]
if '/' in entry_name:
p = parent_dirs(entry_name)
if not recursive:
d = next(p)
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
continue
if recursive_dirs:
for d in p:
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
yield (t, name, entry_name, objid)
if not exists:
raise PathDoesNotExist(rev, path)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
revstr = str(rev)
path = type(self).cleanPath(path)
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self.canonical_rev(revstr)
return [entry]
if 'commit' in report:
import fcntl
import tempfile
files_cache_path = os.path.join(self.private_path, 'files-cache.log')
with open(files_cache_path, 'a+') as files_cache:
fcntl.lockf(files_cache, fcntl.LOCK_EX, 0, 0, os.SEEK_CUR)
files_cache.seek(0)
log = files_cache.read().split('\0')
assert log.pop() == ''
if log:
startlog = int(log[-1].splitlines()[0]) + 1
if startlog >= len(self):
startlog = None
else:
startlog = 0
if startlog is not None:
with tempfile.NamedTemporaryFile() as style:
style.write((
r"changeset = '{rev}\n{node}\n{parents}\n{files}\0'" '\n'
r"parent = '{rev} '" '\n'
r"file = '{file|escape}\n'" '\n'
).encode())
style.flush()
cmd = [HG, 'log', '--style', style.name, '-r', '%d:' % startlog]
output = self._command(cmd).decode(self.encoding, 'replace')
files_cache.write(output)
extend = output.split('\0')
assert extend.pop() == ''
log.extend(extend)
results = []
lookup_commit = {}
for t, fullpath, name, objid in self._ls(revstr, path, recursive, recursive_dirs, directory):
entry = attrdict(path=fullpath)
if name:
entry.name = name
if t == 'd':
entry.type = 'd'
elif t in ' *':
entry.type = 'f'
if 'executable' in report:
entry.executable = t == '*'
if 'size' in report:
entry.size = len(self._cat(revstr, fullpath))
elif t == '@':
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(revstr, name).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + line
if 'commit' in report:
lookup = True
if objid:
try:
import hashlib
concat = (fullpath + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
entry.commit = self._object_cache[k].decode()
entry._commit_cached = True
lookup = False
except KeyError:
pass
if lookup:
if name:
p = type(self).cleanPath(path + '/' + name)
else:
p = path
lookup_commit[p] = (entry, objid)
results.append(entry)
if 'commit' in report:
import heapq
ancestors = [-self._revnum(revstr)]
while ancestors and lookup_commit:
r = -heapq.heappop(ancestors)
lines = log[r].splitlines()
parents = lines[2]
if parents:
for x in parents.split():
x = int(x)
if x != -1:
if -x not in ancestors:
heapq.heappush(ancestors, -x)
elif r > 0:
x = r - 1
if x not in ancestors:
heapq.heappush(ancestors, -x)
for p in list(lookup_commit):
prefix = p.rstrip('/') + '/'
for l in lines[3:]:
if l == p or l.startswith(prefix):
commit = str(lines[1])
entry, objid = lookup_commit[p]
entry.commit = commit
if objid:
import hashlib
concat = (p + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
self._object_cache[k] = commit.encode()
del lookup_commit[p]
break
return results
def _cat(self, rev, path):
cmd = [HG, 'cat', '-r', rev, path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._cat(str(rev), path).decode(self.encoding, 'replace')
def _parse_heads(self, cmd):
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = parse_heads_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def branches(self):
cmd = [HG, 'branches']
return self._parse_heads(cmd)
def tags(self):
cmd = [HG, 'tags']
return self._parse_heads(cmd)
def bookmarks(self):
"""Get list of bookmarks"""
cmd = [HG, 'bookmarks']
output = self._command(cmd).decode(self.encoding, 'replace')
if output.startswith('no bookmarks set'):
return []
results = []
for line in output.splitlines():
m = bookmarks_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def heads(self):
return self.branches() + self.tags() + self.bookmarks()
def empty(self):
cmd = [HG, 'log', '--template=a', '-l1']
output = self._command(cmd)
return len(output) == 0
def __contains__(self, rev):
cmd = [HG, 'log', '--template=a', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [HG, 'id', '-n', '-r', 'tip']
output = self._command(cmd)
return int(output) + 1
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [
HG, 'log', '--debug', '--template={node}\\0{parents}\\0'
'{date|hgdate}\\0{author|nonempty}'
'\\0{desc|tabindent|nonempty}\\0\\0']
if limit is not None:
cmd.append('-l' + str(limit))
if firstparent:
cmd.append('--follow-first')
if merges is not None:
if merges:
cmd.append('--only-merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
pass
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
pass
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1]])
else:
if revrange[1] is None:
cmd.extend(['-r', 'reverse(descendants(%s))' % revrange[0]])
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1], '--prune', str(revrange[0])])
else:
entry = self._commit_cache.get(self.canonical_rev(revrange))
if entry:
entry._cached = True
return entry
cmd.extend(['-r', str(revrange)])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
logs = output.split('\0\0')
logs.pop()
for log in logs:
rev, parents, date, author, message = log.split('\0', 4)
parents = [
x[1] for x in filter(
lambda x: x[0] != '-1',
(x.split(':') for x in parents.split())
)
]
date = parse_hgdate(date)
message = message.replace('\n\t', '\n')
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [HG, 'status', '-C', '--change', str(rev)]
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
copy = None
for line in reversed(output.splitlines()):
if line.startswith(' '):
copy = line.lstrip()
else:
status, path = line.split(None, 1)
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
copy = None
results.reverse()
return results
def pdiff(self, rev):
cmd = [HG, 'log', '--template=a', '-p', '-r', str(rev)]
return self._command(cmd)[1:].decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [HG, 'diff', '-r', rev_a, '-r', rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [HG, 'log', '--template={node}', '-r', 'ancestor(%s, %s)' % (rev1, rev2)]
output = self._command(cmd).decode()
if output == '':
return None
else:
return output
def _blame(self, rev, path):
cmd = [HG, 'annotate', '-unv', '-r', rev, '--', path]
output = self._command(cmd).decode(self.encoding, 'replace')
revs = {}
results = []
cat = self._cat(rev, path)
for line, text in zip(output.splitlines(), cat.splitlines()):
m = annotate_rx.match(line)
assert m, 'unexpected output: ' + line
rev, author = m.group('rev', 'author')
try:
rev, date = revs[rev]
except KeyError:
cmd = [HG, 'log', '--template={node}\n{date|hgdate}', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
rev, date = output.split('\n', 1)
date = parse_hgdate(date)
revs[rev] = rev, date
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def tip(self, head):
return self.canonical_rev(head)
|
ScottDuckworth/python-anyvcs
|
anyvcs/hg.py
|
HgRepo.bookmarks
|
python
|
def bookmarks(self):
cmd = [HG, 'bookmarks']
output = self._command(cmd).decode(self.encoding, 'replace')
if output.startswith('no bookmarks set'):
return []
results = []
for line in output.splitlines():
m = bookmarks_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
|
Get list of bookmarks
|
train
|
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/hg.py#L330-L341
|
[
"def _command(self, cmd, input=None, **kwargs):\n kwargs.setdefault('cwd', self.path)\n return command(cmd, **kwargs)\n"
] |
class HgRepo(VCSRepo):
"""A Mercurial repository
Valid revisions are anything that Mercurial considers as a revision.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Clone an existing repository to a new bare repository."""
# Mercurial will not create intermediate directories for clones.
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath]
subprocess.check_call(cmd)
return cls(destpath)
@classmethod
def create(cls, path):
"""Create a new repository"""
cmd = [HG, 'init', path]
subprocess.check_call(cmd)
return cls(path)
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
path = os.path.join(self.path, '.hg', '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
@property
def _object_cache(self):
try:
return self._object_cache_v
except AttributeError:
object_cache_path = os.path.join(self.private_path, 'object-cache')
self._object_cache_v = HashDict(object_cache_path)
return self._object_cache_v
def canonical_rev(self, rev):
if isinstance(rev, str) and canonical_rev_rx.match(rev):
return rev
else:
cmd = [HG, 'log', '--template={node}', '-r', str(rev)]
return self._command(cmd).decode()
def compose_rev(self, branch, rev):
return self.canonical_rev(rev)
def _revnum(self, rev):
if isinstance(rev, int):
return rev
elif isinstance(rev, str) and rev.isdigit():
return int(rev)
else:
cmd = [HG, 'log', '--template={rev}', '-r', str(rev)]
return int(self._command(cmd))
def _ls(
self, rev, path, recursive=False, recursive_dirs=False, directory=False
):
forcedir = False
if path.endswith('/'):
forcedir = True
path = path.rstrip('/')
if path == '':
ltrim = 0
prefix = ''
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [HG, 'manifest', '--debug', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
if not output:
return
dirs = set()
exists = False
for line in output.splitlines():
m = manifest_rx.match(line)
assert m, 'unexpected output: ' + line
t, name, objid = m.group('type', 'name', 'object')
if name.startswith(prefix) or (not forcedir and name == path):
if directory and name.startswith(prefix):
yield ('d', path, '', None)
return
exists = True
entry_name = name[ltrim:]
if '/' in entry_name:
p = parent_dirs(entry_name)
if not recursive:
d = next(p)
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
continue
if recursive_dirs:
for d in p:
if d not in dirs:
dirs.add(d)
yield ('d', prefix + d, d, None)
yield (t, name, entry_name, objid)
if not exists:
raise PathDoesNotExist(rev, path)
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
revstr = str(rev)
path = type(self).cleanPath(path)
if path == '':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self.canonical_rev(revstr)
return [entry]
if 'commit' in report:
import fcntl
import tempfile
files_cache_path = os.path.join(self.private_path, 'files-cache.log')
with open(files_cache_path, 'a+') as files_cache:
fcntl.lockf(files_cache, fcntl.LOCK_EX, 0, 0, os.SEEK_CUR)
files_cache.seek(0)
log = files_cache.read().split('\0')
assert log.pop() == ''
if log:
startlog = int(log[-1].splitlines()[0]) + 1
if startlog >= len(self):
startlog = None
else:
startlog = 0
if startlog is not None:
with tempfile.NamedTemporaryFile() as style:
style.write((
r"changeset = '{rev}\n{node}\n{parents}\n{files}\0'" '\n'
r"parent = '{rev} '" '\n'
r"file = '{file|escape}\n'" '\n'
).encode())
style.flush()
cmd = [HG, 'log', '--style', style.name, '-r', '%d:' % startlog]
output = self._command(cmd).decode(self.encoding, 'replace')
files_cache.write(output)
extend = output.split('\0')
assert extend.pop() == ''
log.extend(extend)
results = []
lookup_commit = {}
for t, fullpath, name, objid in self._ls(revstr, path, recursive, recursive_dirs, directory):
entry = attrdict(path=fullpath)
if name:
entry.name = name
if t == 'd':
entry.type = 'd'
elif t in ' *':
entry.type = 'f'
if 'executable' in report:
entry.executable = t == '*'
if 'size' in report:
entry.size = len(self._cat(revstr, fullpath))
elif t == '@':
entry.type = 'l'
if 'target' in report:
entry.target = self._cat(revstr, name).decode(self.encoding, 'replace')
else:
assert False, 'unexpected output: ' + line
if 'commit' in report:
lookup = True
if objid:
try:
import hashlib
concat = (fullpath + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
entry.commit = self._object_cache[k].decode()
entry._commit_cached = True
lookup = False
except KeyError:
pass
if lookup:
if name:
p = type(self).cleanPath(path + '/' + name)
else:
p = path
lookup_commit[p] = (entry, objid)
results.append(entry)
if 'commit' in report:
import heapq
ancestors = [-self._revnum(revstr)]
while ancestors and lookup_commit:
r = -heapq.heappop(ancestors)
lines = log[r].splitlines()
parents = lines[2]
if parents:
for x in parents.split():
x = int(x)
if x != -1:
if -x not in ancestors:
heapq.heappush(ancestors, -x)
elif r > 0:
x = r - 1
if x not in ancestors:
heapq.heappush(ancestors, -x)
for p in list(lookup_commit):
prefix = p.rstrip('/') + '/'
for l in lines[3:]:
if l == p or l.startswith(prefix):
commit = str(lines[1])
entry, objid = lookup_commit[p]
entry.commit = commit
if objid:
import hashlib
concat = (p + objid).encode(self.encoding)
k = hashlib.sha1(concat).hexdigest()
self._object_cache[k] = commit.encode()
del lookup_commit[p]
break
return results
def _cat(self, rev, path):
cmd = [HG, 'cat', '-r', rev, path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def readlink(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._cat(str(rev), path).decode(self.encoding, 'replace')
def _parse_heads(self, cmd):
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines():
m = parse_heads_rx.match(line)
assert m, 'unexpected output: ' + line
results.append(m.group('name'))
return results
def branches(self):
cmd = [HG, 'branches']
return self._parse_heads(cmd)
def tags(self):
cmd = [HG, 'tags']
return self._parse_heads(cmd)
def heads(self):
return self.branches() + self.tags() + self.bookmarks()
def empty(self):
cmd = [HG, 'log', '--template=a', '-l1']
output = self._command(cmd)
return len(output) == 0
def __contains__(self, rev):
cmd = [HG, 'log', '--template=a', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [HG, 'id', '-n', '-r', 'tip']
output = self._command(cmd)
return int(output) + 1
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
cmd = [
HG, 'log', '--debug', '--template={node}\\0{parents}\\0'
'{date|hgdate}\\0{author|nonempty}'
'\\0{desc|tabindent|nonempty}\\0\\0']
if limit is not None:
cmd.append('-l' + str(limit))
if firstparent:
cmd.append('--follow-first')
if merges is not None:
if merges:
cmd.append('--only-merges')
else:
cmd.append('--no-merges')
single = False
if revrange is None:
pass
elif isinstance(revrange, (tuple, list)):
if revrange[0] is None:
if revrange[1] is None:
pass
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1]])
else:
if revrange[1] is None:
cmd.extend(['-r', 'reverse(descendants(%s))' % revrange[0]])
else:
cmd.extend(['-r', 'reverse(ancestors(%s))' % revrange[1], '--prune', str(revrange[0])])
else:
entry = self._commit_cache.get(self.canonical_rev(revrange))
if entry:
entry._cached = True
return entry
cmd.extend(['-r', str(revrange)])
single = True
if path:
if follow:
cmd.append('--follow')
cmd.extend(['--', type(self).cleanPath(path)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
logs = output.split('\0\0')
logs.pop()
for log in logs:
rev, parents, date, author, message = log.split('\0', 4)
parents = [
x[1] for x in filter(
lambda x: x[0] != '-1',
(x.split(':') for x in parents.split())
)
]
date = parse_hgdate(date)
message = message.replace('\n\t', '\n')
entry = CommitLogEntry(rev, parents, date, author, message)
if rev not in self._commit_cache:
self._commit_cache[rev] = entry
if single:
return entry
results.append(entry)
return results
def changed(self, rev):
cmd = [HG, 'status', '-C', '--change', str(rev)]
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
copy = None
for line in reversed(output.splitlines()):
if line.startswith(' '):
copy = line.lstrip()
else:
status, path = line.split(None, 1)
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
copy = None
results.reverse()
return results
def pdiff(self, rev):
cmd = [HG, 'log', '--template=a', '-p', '-r', str(rev)]
return self._command(cmd)[1:].decode(self.encoding)
def diff(self, rev_a, rev_b, path=None):
cmd = [HG, 'diff', '-r', rev_a, '-r', rev_b]
if path is not None:
cmd.extend(['--', type(self).cleanPath(path)])
return self._command(cmd).decode(self.encoding)
def ancestor(self, rev1, rev2):
cmd = [HG, 'log', '--template={node}', '-r', 'ancestor(%s, %s)' % (rev1, rev2)]
output = self._command(cmd).decode()
if output == '':
return None
else:
return output
def _blame(self, rev, path):
cmd = [HG, 'annotate', '-unv', '-r', rev, '--', path]
output = self._command(cmd).decode(self.encoding, 'replace')
revs = {}
results = []
cat = self._cat(rev, path)
for line, text in zip(output.splitlines(), cat.splitlines()):
m = annotate_rx.match(line)
assert m, 'unexpected output: ' + line
rev, author = m.group('rev', 'author')
try:
rev, date = revs[rev]
except KeyError:
cmd = [HG, 'log', '--template={node}\n{date|hgdate}', '-r', rev]
output = self._command(cmd).decode(self.encoding, 'replace')
rev, date = output.split('\n', 1)
date = parse_hgdate(date)
revs[rev] = rev, date
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
path = type(self).cleanPath(path)
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def tip(self, head):
return self.canonical_rev(head)
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.fit
|
python
|
def fit(self):
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
|
Fit MCMC AgeDepthModel
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L64-L72
|
[
"def run(self):\n self.validate()\n return McmcResults(self)\n",
"def burnin(self, n):\n \"\"\"Remove the earliest n ensemble members from the MCMC output\"\"\"\n self.sediment_rate = self.sediment_rate[:, n:]\n self.headage = self.headage[n:]\n self.sediment_memory = self.sediment_memory[n:]\n self.objective = self.objective[n:]\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.date
|
python
|
def date(self, proxy, how='median', n=500):
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
|
Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L74-L102
|
[
"def agedepth(self, d):\n \"\"\"Get calendar age for a depth\n\n Parameters\n ----------\n d : float\n Sediment depth (in cm).\n\n Returns\n -------\n Numeric giving true age at given depth.\n \"\"\"\n # TODO(brews): Function cannot handle hiatus\n # See lines 77 - 100 of hist2.cpp\n x = self.mcmcfit.sediment_rate\n theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment\n deltac = self.thick\n c0 = min(self.depth) # Uniform depth segment abscissa (in cm).\n assert d > c0 or np.isclose(c0, d, atol = 1e-4)\n out = theta0.astype(float)\n i = int(np.floor((d - c0) / deltac))\n for j in range(i):\n out += x[j] * deltac\n ci = c0 + i * deltac\n assert ci < d or np.isclose(ci, d, atol = 1e-4)\n try:\n next_x = x[i]\n except IndexError:\n # Extrapolating\n next_x = x[i - 1]\n out += next_x * (d - ci)\n return out\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.plot
|
python
|
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
|
Age-depth plot
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L104-L116
|
[
"def age_median(self):\n return np.median(self.age_ensemble, axis=1)\n",
"def age_percentile(self, p):\n return np.percentile(self.age_ensemble, q=p, axis=1)\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.agedepth
|
python
|
def agedepth(self, d):
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
|
Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L118-L149
| null |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.plot_prior_dates
|
python
|
def plot_prior_dates(self, dwidth=30, ax=None):
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
|
Plot prior chronology dates in age-depth plot
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L154-L176
|
[
"def prior_dates(self):\n return self.mcmcsetup.prior_dates()\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.plot_sediment_rate
|
python
|
def plot_sediment_rate(self, ax=None):
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
|
Plot sediment accumulation rate prior and posterior distributions
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L181-L205
|
[
"def prior_sediment_rate(self):\n return self.mcmcsetup.prior_sediment_rate()\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
brews/snakebacon
|
snakebacon/agedepth.py
|
AgeDepthModel.plot_sediment_memory
|
python
|
def plot_sediment_memory(self, ax=None):
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
Plot sediment memory prior and posterior distributions
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L210-L234
|
[
"def prior_sediment_memory(self):\n return self.mcmcsetup.prior_sediment_memory()\n"
] |
class AgeDepthModel:
def __init__(self, coredates, *, mcmc_kws, hold=False, burnin=200):
self.burnin = int(burnin)
self.mcmcsetup = McmcSetup(coredates, **mcmc_kws)
self._mcmcfit = None
self._thick = None
self._depth = None
self._age_ensemble = None
if not hold:
self.fit()
@property
def mcmcfit(self):
if self._mcmcfit is not None:
return self._mcmcfit
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def thick(self):
if self._thick is not None:
return self._thick
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def depth(self):
if self._depth is not None:
return self._depth
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
@property
def age_ensemble(self):
if self._age_ensemble is not None:
return self._age_ensemble
else:
raise NeedFitError('AgeDepthModel instance needs to be fit() first')
def __repr__(self):
return '%s(coredates=%r, mcmc_kws=%r, burnin=%r)' % (type(self).__name__, self.mcmcsetup.coredates, self.mcmcsetup.mcmc_kws, self.burnin)
def age_median(self):
return np.median(self.age_ensemble, axis=1)
def age_percentile(self, p):
return np.percentile(self.age_ensemble, q=p, axis=1)
def fit(self):
"""Fit MCMC AgeDepthModel"""
self._mcmcfit = self.mcmcsetup.run()
self._mcmcfit.burnin(self.burnin)
dmin = min(self._mcmcfit.depth_segments)
dmax = max(self._mcmcfit.depth_segments)
self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments)
self._depth = np.arange(dmin, dmax + 0.001)
self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
def plot(self, agebins=50, p=(2.5, 97.5), ax=None):
"""Age-depth plot"""
if ax is None:
ax = plt.gca()
ax.hist2d(np.repeat(self.depth, self.age_ensemble.shape[1]), self.age_ensemble.flatten(),
(len(self.depth), agebins), cmin=1)
ax.step(self.depth, self.age_median(), where='mid', color='red')
ax.step(self.depth, self.age_percentile(p[0]), where='mid', color='red', linestyle=':')
ax.step(self.depth, self.age_percentile(p[1]), where='mid', color='red', linestyle=':')
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
def prior_dates(self):
return self.mcmcsetup.prior_dates()
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
def prior_sediment_rate(self):
return self.mcmcsetup.prior_sediment_rate()
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
def prior_sediment_memory(self):
return self.mcmcsetup.prior_sediment_memory()
|
brews/snakebacon
|
snakebacon/records.py
|
read_14c
|
python
|
def read_14c(fl):
indata = pd.read_csv(fl, index_col=None, skiprows=11, header=None,
names=['calbp', 'c14age', 'error', 'delta14c', 'sigma'])
outcurve = CalibCurve(calbp=indata['calbp'],
c14age=indata['c14age'],
error=indata['error'],
delta14c=indata['delta14c'],
sigma=indata['sigma'])
return outcurve
|
Create CalibCurve instance from Bacon curve file
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/records.py#L14-L24
| null |
import logging
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
log = logging.getLogger(__name__)
def read_chron(fl):
"""Create ChronRecord instance from Bacon file
"""
indata = pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python')
outcore = ChronRecord(age=indata['age'],
error=indata['error'],
depth=indata['depth'],
labid=indata['labID'])
return outcore
def read_proxy(fl):
"""Read a file to create a proxy record instance
"""
outcore = ProxyRecord(data=pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python'))
return outcore
class ProxyRecord:
def __init__(self, data):
"""Create a proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
"""
assert 'depth' in data.columns.values
assert 'age' not in data.columns.values
self.data = pd.DataFrame(data).copy()
def __repr__(self):
return '%s(data=%r)' % (type(self).__name__, self.data)
class DatedProxyRecord(ProxyRecord):
def __init__(self, data, age):
"""Create a dated proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
age : iterable
Iterable containing calendar year, or a list of years (cal yr BP) for corresponding to each sample depth in
data.depth.
"""
super().__init__(data)
assert len(data.depth) == len(age)
self.age = np.array(age)
def __repr__(self):
return '%s(data=%r, age=%r)' % (type(self).__name__, self.data, self.age)
def n_members(self):
"""Get number of MCMC ensemble members in calendar age estimates"""
try:
n = len(self.age[0])
except TypeError:
n = 1
return n
def to_pandas(self):
"""Convert record to pandas.DataFrame"""
agedepthdf = pd.DataFrame(self.age, index=self.data.depth)
agedepthdf.columns = list(range(self.n_members()))
out = (agedepthdf.join(self.data.set_index('depth'))
.reset_index()
.melt(id_vars=self.data.columns.values, var_name='mciter', value_name='age'))
out['mciter'] = pd.to_numeric(out.loc[:, 'mciter'])
if self.n_members() == 1:
out = out.drop('mciter', axis=1)
return out
class ChronRecord:
def __init__(self, obj=None, **kwargs):
"""Create a sediment core date instance
Parameters
----------
obj : obj, optional
Object with iterable attributes 'labid', 'age', 'error', and 'depth'. Assumes that depth is in increasing order
order. Cannot use **kwargs if passing obj.
**kwargs : optional
Must include objects with iterables for 'labid', 'age', 'error', and 'depth'. Assumes depth is in in
increasing order. Only parsed if obj is None.
Returns
-------
A ChronRecord instance.
"""
if obj is not None:
self.labid = np.array(obj.labid)
self.age = np.array(obj.age)
self.error = np.array(obj.error) # Note this is called "std" in output .bacon file.
self.depth = np.array(obj.depth)
else:
self.labid = np.array(kwargs['labid'])
self.age = np.array(kwargs['age'])
self.error = np.array(kwargs['error'])
self.depth = np.array(kwargs['depth'])
def __repr__(self):
# return '%s(%r)' % (type(self).__name__, self)
return '%s(age=%r, error=%r, depth=%r, labid=%r)' % (type(self).__name__, self.age, self.error, self.depth, self.labid)
class CalibCurve:
"""A calibration curve
"""
def __init__(self, calbp, c14age, error, delta14c=None, sigma=None):
"""Create a calibration curve instance
Parameters
----------
calbp : ndarray
c14age : ndarray
error : ndarray
delta14c : ndarray
sigma : ndarray
"""
self.calbp = np.array(calbp)
self.c14age = np.array(c14age)
self.error = np.array(error)
if delta14c is None:
delta14c = np.zeros(calbp.shape)
self.delta14c = np.array(delta14c) # d_R
if sigma is None:
sigma = np.zeros(calbp.shape)
self.sigma = np.array(sigma) # d_R variance?
def __repr__(self):
return '%s(calbp=%r, c14age=%r, error=%r, delta14c=%r, sigma=%r)' % (type(self).__name__, self.calbp, self.c14age, self.error, self.delta14c, self.sigma)
|
brews/snakebacon
|
snakebacon/records.py
|
read_chron
|
python
|
def read_chron(fl):
indata = pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python')
outcore = ChronRecord(age=indata['age'],
error=indata['error'],
depth=indata['depth'],
labid=indata['labID'])
return outcore
|
Create ChronRecord instance from Bacon file
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/records.py#L27-L35
| null |
import logging
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
log = logging.getLogger(__name__)
def read_14c(fl):
"""Create CalibCurve instance from Bacon curve file
"""
indata = pd.read_csv(fl, index_col=None, skiprows=11, header=None,
names=['calbp', 'c14age', 'error', 'delta14c', 'sigma'])
outcurve = CalibCurve(calbp=indata['calbp'],
c14age=indata['c14age'],
error=indata['error'],
delta14c=indata['delta14c'],
sigma=indata['sigma'])
return outcurve
def read_proxy(fl):
"""Read a file to create a proxy record instance
"""
outcore = ProxyRecord(data=pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python'))
return outcore
class ProxyRecord:
def __init__(self, data):
"""Create a proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
"""
assert 'depth' in data.columns.values
assert 'age' not in data.columns.values
self.data = pd.DataFrame(data).copy()
def __repr__(self):
return '%s(data=%r)' % (type(self).__name__, self.data)
class DatedProxyRecord(ProxyRecord):
def __init__(self, data, age):
"""Create a dated proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
age : iterable
Iterable containing calendar year, or a list of years (cal yr BP) for corresponding to each sample depth in
data.depth.
"""
super().__init__(data)
assert len(data.depth) == len(age)
self.age = np.array(age)
def __repr__(self):
return '%s(data=%r, age=%r)' % (type(self).__name__, self.data, self.age)
def n_members(self):
"""Get number of MCMC ensemble members in calendar age estimates"""
try:
n = len(self.age[0])
except TypeError:
n = 1
return n
def to_pandas(self):
"""Convert record to pandas.DataFrame"""
agedepthdf = pd.DataFrame(self.age, index=self.data.depth)
agedepthdf.columns = list(range(self.n_members()))
out = (agedepthdf.join(self.data.set_index('depth'))
.reset_index()
.melt(id_vars=self.data.columns.values, var_name='mciter', value_name='age'))
out['mciter'] = pd.to_numeric(out.loc[:, 'mciter'])
if self.n_members() == 1:
out = out.drop('mciter', axis=1)
return out
class ChronRecord:
def __init__(self, obj=None, **kwargs):
"""Create a sediment core date instance
Parameters
----------
obj : obj, optional
Object with iterable attributes 'labid', 'age', 'error', and 'depth'. Assumes that depth is in increasing order
order. Cannot use **kwargs if passing obj.
**kwargs : optional
Must include objects with iterables for 'labid', 'age', 'error', and 'depth'. Assumes depth is in in
increasing order. Only parsed if obj is None.
Returns
-------
A ChronRecord instance.
"""
if obj is not None:
self.labid = np.array(obj.labid)
self.age = np.array(obj.age)
self.error = np.array(obj.error) # Note this is called "std" in output .bacon file.
self.depth = np.array(obj.depth)
else:
self.labid = np.array(kwargs['labid'])
self.age = np.array(kwargs['age'])
self.error = np.array(kwargs['error'])
self.depth = np.array(kwargs['depth'])
def __repr__(self):
# return '%s(%r)' % (type(self).__name__, self)
return '%s(age=%r, error=%r, depth=%r, labid=%r)' % (type(self).__name__, self.age, self.error, self.depth, self.labid)
class CalibCurve:
"""A calibration curve
"""
def __init__(self, calbp, c14age, error, delta14c=None, sigma=None):
"""Create a calibration curve instance
Parameters
----------
calbp : ndarray
c14age : ndarray
error : ndarray
delta14c : ndarray
sigma : ndarray
"""
self.calbp = np.array(calbp)
self.c14age = np.array(c14age)
self.error = np.array(error)
if delta14c is None:
delta14c = np.zeros(calbp.shape)
self.delta14c = np.array(delta14c) # d_R
if sigma is None:
sigma = np.zeros(calbp.shape)
self.sigma = np.array(sigma) # d_R variance?
def __repr__(self):
return '%s(calbp=%r, c14age=%r, error=%r, delta14c=%r, sigma=%r)' % (type(self).__name__, self.calbp, self.c14age, self.error, self.delta14c, self.sigma)
|
brews/snakebacon
|
snakebacon/records.py
|
read_proxy
|
python
|
def read_proxy(fl):
outcore = ProxyRecord(data=pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python'))
return outcore
|
Read a file to create a proxy record instance
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/records.py#L38-L42
| null |
import logging
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
log = logging.getLogger(__name__)
def read_14c(fl):
"""Create CalibCurve instance from Bacon curve file
"""
indata = pd.read_csv(fl, index_col=None, skiprows=11, header=None,
names=['calbp', 'c14age', 'error', 'delta14c', 'sigma'])
outcurve = CalibCurve(calbp=indata['calbp'],
c14age=indata['c14age'],
error=indata['error'],
delta14c=indata['delta14c'],
sigma=indata['sigma'])
return outcurve
def read_chron(fl):
"""Create ChronRecord instance from Bacon file
"""
indata = pd.read_csv(fl, sep=r'\s*\,\s*', index_col=None, engine='python')
outcore = ChronRecord(age=indata['age'],
error=indata['error'],
depth=indata['depth'],
labid=indata['labID'])
return outcore
class ProxyRecord:
def __init__(self, data):
"""Create a proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
"""
assert 'depth' in data.columns.values
assert 'age' not in data.columns.values
self.data = pd.DataFrame(data).copy()
def __repr__(self):
return '%s(data=%r)' % (type(self).__name__, self.data)
class DatedProxyRecord(ProxyRecord):
def __init__(self, data, age):
"""Create a dated proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
age : iterable
Iterable containing calendar year, or a list of years (cal yr BP) for corresponding to each sample depth in
data.depth.
"""
super().__init__(data)
assert len(data.depth) == len(age)
self.age = np.array(age)
def __repr__(self):
return '%s(data=%r, age=%r)' % (type(self).__name__, self.data, self.age)
def n_members(self):
"""Get number of MCMC ensemble members in calendar age estimates"""
try:
n = len(self.age[0])
except TypeError:
n = 1
return n
def to_pandas(self):
"""Convert record to pandas.DataFrame"""
agedepthdf = pd.DataFrame(self.age, index=self.data.depth)
agedepthdf.columns = list(range(self.n_members()))
out = (agedepthdf.join(self.data.set_index('depth'))
.reset_index()
.melt(id_vars=self.data.columns.values, var_name='mciter', value_name='age'))
out['mciter'] = pd.to_numeric(out.loc[:, 'mciter'])
if self.n_members() == 1:
out = out.drop('mciter', axis=1)
return out
class ChronRecord:
def __init__(self, obj=None, **kwargs):
"""Create a sediment core date instance
Parameters
----------
obj : obj, optional
Object with iterable attributes 'labid', 'age', 'error', and 'depth'. Assumes that depth is in increasing order
order. Cannot use **kwargs if passing obj.
**kwargs : optional
Must include objects with iterables for 'labid', 'age', 'error', and 'depth'. Assumes depth is in in
increasing order. Only parsed if obj is None.
Returns
-------
A ChronRecord instance.
"""
if obj is not None:
self.labid = np.array(obj.labid)
self.age = np.array(obj.age)
self.error = np.array(obj.error) # Note this is called "std" in output .bacon file.
self.depth = np.array(obj.depth)
else:
self.labid = np.array(kwargs['labid'])
self.age = np.array(kwargs['age'])
self.error = np.array(kwargs['error'])
self.depth = np.array(kwargs['depth'])
def __repr__(self):
# return '%s(%r)' % (type(self).__name__, self)
return '%s(age=%r, error=%r, depth=%r, labid=%r)' % (type(self).__name__, self.age, self.error, self.depth, self.labid)
class CalibCurve:
"""A calibration curve
"""
def __init__(self, calbp, c14age, error, delta14c=None, sigma=None):
"""Create a calibration curve instance
Parameters
----------
calbp : ndarray
c14age : ndarray
error : ndarray
delta14c : ndarray
sigma : ndarray
"""
self.calbp = np.array(calbp)
self.c14age = np.array(c14age)
self.error = np.array(error)
if delta14c is None:
delta14c = np.zeros(calbp.shape)
self.delta14c = np.array(delta14c) # d_R
if sigma is None:
sigma = np.zeros(calbp.shape)
self.sigma = np.array(sigma) # d_R variance?
def __repr__(self):
return '%s(calbp=%r, c14age=%r, error=%r, delta14c=%r, sigma=%r)' % (type(self).__name__, self.calbp, self.c14age, self.error, self.delta14c, self.sigma)
|
brews/snakebacon
|
snakebacon/records.py
|
DatedProxyRecord.to_pandas
|
python
|
def to_pandas(self):
agedepthdf = pd.DataFrame(self.age, index=self.data.depth)
agedepthdf.columns = list(range(self.n_members()))
out = (agedepthdf.join(self.data.set_index('depth'))
.reset_index()
.melt(id_vars=self.data.columns.values, var_name='mciter', value_name='age'))
out['mciter'] = pd.to_numeric(out.loc[:, 'mciter'])
if self.n_members() == 1:
out = out.drop('mciter', axis=1)
return out
|
Convert record to pandas.DataFrame
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/records.py#L89-L99
|
[
"def n_members(self):\n \"\"\"Get number of MCMC ensemble members in calendar age estimates\"\"\"\n try:\n n = len(self.age[0])\n except TypeError:\n n = 1\n return n\n"
] |
class DatedProxyRecord(ProxyRecord):
def __init__(self, data, age):
"""Create a dated proxy record instance
Parameters
----------
data : DataFrame
Pandas dataframe containing columns with proxy sample measurements. Must also have 'depth' column.
age : iterable
Iterable containing calendar year, or a list of years (cal yr BP) for corresponding to each sample depth in
data.depth.
"""
super().__init__(data)
assert len(data.depth) == len(age)
self.age = np.array(age)
def __repr__(self):
return '%s(data=%r, age=%r)' % (type(self).__name__, self.data, self.age)
def n_members(self):
"""Get number of MCMC ensemble members in calendar age estimates"""
try:
n = len(self.age[0])
except TypeError:
n = 1
return n
|
brews/snakebacon
|
snakebacon/mcmc.py
|
McmcResults.burnin
|
python
|
def burnin(self, n):
self.sediment_rate = self.sediment_rate[:, n:]
self.headage = self.headage[n:]
self.sediment_memory = self.sediment_memory[n:]
self.objective = self.objective[n:]
|
Remove the earliest n ensemble members from the MCMC output
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmc.py#L54-L59
| null |
class McmcResults:
def __init__(self, setup):
mcmcout = setup.mcmcbackend.runmcmc(core_labid=setup.coredates.labid,
core_age=setup.coredates.age,
core_error=setup.coredates.error,
core_depth=setup.coredates.depth,
**setup.mcmc_kws)
self.depth_segments = np.linspace(setup.mcmc_kws['depth_min'], setup.mcmc_kws['depth_max'],
setup.mcmc_kws['k'])
self.headage = mcmcout['theta']
self.sediment_rate = mcmcout['x']
self.sediment_memory = mcmcout['w']
self.objective = mcmcout['objective']
def n_members(self):
"""Get number of MCMC ensemble members in results"""
return len(self.objective)
def plot(self):
# TODO(brews): Write function to plot raw Mcmc results.
pass
|
brews/snakebacon
|
snakebacon/mcmcbackends/__init__.py
|
Bacon.prior_dates
|
python
|
def prior_dates(*args, **kwargs):
try:
chron = args[0]
except IndexError:
chron = kwargs['coredates']
d_r = np.array(kwargs['d_r'])
d_std = np.array(kwargs['d_std'])
t_a = np.array(kwargs['t_a'])
t_b = np.array(kwargs['t_b'])
try:
normal_distr = kwargs['normal_distr']
except KeyError:
normal_distr = None
cc_int = kwargs['cc']
ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',
3: 'SHCal13', 4: 'ConstCal'}
# There is a better way to do this.
if 'cc1' in kwargs:
ccdict[1] = str(kwargs['cc1'])
if 'cc2' in kwargs:
ccdict[2] = str(kwargs['cc2'])
if 'cc3' in kwargs:
ccdict[3] = str(kwargs['cc3'])
if 'cc4' in kwargs:
ccdict[4] = str(kwargs['cc4'])
cc = []
for i in cc_int:
i = int(i)
cc.append(fetch_calibcurve(ccdict[i]))
d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,
t_a=t_a, t_b=t_b, normal_distr=normal_distr)
return d, p
|
Get the prior distribution of calibrated radiocarbon dates
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/__init__.py#L11-L49
|
[
"def fetch_calibcurve(curvename):\n \"\"\"Get CalibCurve from name string\"\"\"\n f = available_curves[curvename]\n return f()\n",
"def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]):\n \"\"\"Get density of calendar dates for chron date segment in core\n\n Parameters\n ----------\n chron : DatedProxy-like\n calib_curve : CalibCurve or list of CalibCurves\n d_r : scalar or ndarray\n Carbon reservoir offset.\n d_std : scalar or ndarray\n Carbon reservoir offset error standard deviation.\n cutoff : scalar, optional\n Unknown.\n normal_distr : Bool, optional\n Use normal distribution for date errors. If False, then use Student's t-distribution.\n t_a : scalar or ndarray, optional\n Student's t-distribution parameter, a. t_a - 1 must equal t_b.\n t_b : scalar or ndarray, optional\n Student's t-distribution parameter, b. t_a - 1 must equal t_b.\n\n Returns\n -------\n depth : ndarray\n Depth of dated sediment sample.\n probs : list of 2d arrays\n Density of calendar age for each dated sediment sample. For each\n sediment sample, the 2d array has two columns, the first is the\n calendar age. The second column is the density for that calendar age.\n\n \"\"\"\n # Python version of .bacon.calib() on line 908 in Bacon.R\n\n # .bacon.calib - line 908\n\n # rcmean = 4128; w2 = 4225; t_a=3; t_b=4\n # test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,\n # t_b=t_b, cutoff=cutoff, normal = normal)\n\n # Line 959 of Bacon.R\n # calib = list(dets.iloc[:, 3])\n # Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R\n\n # Line #973\n # TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.\n # TODO(brews): Check whether we call returned values densities, freqs or what options we should have.\n n = len(chron.depth)\n calib_curve = np.array(calib_curve)\n t_a = np.array(t_a)\n t_b = np.array(t_b)\n assert t_b - 1 == t_a\n d_r = np.array(d_r)\n d_std = np.array(d_std)\n if len(t_a) == 1:\n t_a = np.repeat(t_a, n)\n if len(t_b) == 1:\n t_b = np.repeat(t_b, n)\n if len(d_r) == 1:\n d_r = np.repeat(d_r, n)\n if len(d_std) == 1:\n d_std = np.repeat(d_std, n)\n if len(calib_curve) == 1:\n calib_curve = np.repeat(calib_curve, n)\n\n calib_probs = []\n rcmean = chron.age - d_r\n w2 = chron.error ** 2 + d_std ** 2\n for i in range(n):\n age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i],\n t_a=t_a[i], t_b=t_b[i],\n cutoff=cutoff, normal_distr=normal_distr)\n calib_probs.append(age_realizations)\n return np.array(chron.depth), calib_probs\n"
] |
class Bacon:
def runmcmc(*args, **kwargs):
return run_baconmcmc(*args, **kwargs)
def prior_sediment_rate(*args, **kwargs):
"""Get the prior density of sediment rates
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of sediment accumulation values (yr/cm) over which the density was evaluated.
"""
# PlotAccPrior @ Bacon.R ln 113 -> ln 1097-1115
# alpha = acc_shape, beta = acc_shape / acc_mean
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
acc_mean = kwargs['acc_mean']
acc_shape = kwargs['acc_shape']
x = np.linspace(0, 6 * np.max(acc_mean), 100)
y = stats.gamma.pdf(x, a=acc_shape,
scale=1 / (acc_shape/acc_mean))
return y, x
def prior_sediment_memory(*args, **kwargs):
"""Get the prior density of sediment memory
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of Memory (ratio) values over which the density was evaluated.
"""
# "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)"
# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141
# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
mem_shape = kwargs['mem_strength'] # aka. `mem_shape`
mem_mean = kwargs['mem_mean']
x = np.linspace(0, 1, 100)
y = stats.beta.pdf(x, a=mem_shape * mem_mean,
b=mem_shape * (1 - mem_mean))
return y, x
|
brews/snakebacon
|
snakebacon/mcmcbackends/__init__.py
|
Bacon.prior_sediment_rate
|
python
|
def prior_sediment_rate(*args, **kwargs):
# PlotAccPrior @ Bacon.R ln 113 -> ln 1097-1115
# alpha = acc_shape, beta = acc_shape / acc_mean
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
acc_mean = kwargs['acc_mean']
acc_shape = kwargs['acc_shape']
x = np.linspace(0, 6 * np.max(acc_mean), 100)
y = stats.gamma.pdf(x, a=acc_shape,
scale=1 / (acc_shape/acc_mean))
return y, x
|
Get the prior density of sediment rates
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of sediment accumulation values (yr/cm) over which the density was evaluated.
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/__init__.py#L52-L70
| null |
class Bacon:
def runmcmc(*args, **kwargs):
return run_baconmcmc(*args, **kwargs)
def prior_dates(*args, **kwargs):
"""Get the prior distribution of calibrated radiocarbon dates"""
try:
chron = args[0]
except IndexError:
chron = kwargs['coredates']
d_r = np.array(kwargs['d_r'])
d_std = np.array(kwargs['d_std'])
t_a = np.array(kwargs['t_a'])
t_b = np.array(kwargs['t_b'])
try:
normal_distr = kwargs['normal_distr']
except KeyError:
normal_distr = None
cc_int = kwargs['cc']
ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',
3: 'SHCal13', 4: 'ConstCal'}
# There is a better way to do this.
if 'cc1' in kwargs:
ccdict[1] = str(kwargs['cc1'])
if 'cc2' in kwargs:
ccdict[2] = str(kwargs['cc2'])
if 'cc3' in kwargs:
ccdict[3] = str(kwargs['cc3'])
if 'cc4' in kwargs:
ccdict[4] = str(kwargs['cc4'])
cc = []
for i in cc_int:
i = int(i)
cc.append(fetch_calibcurve(ccdict[i]))
d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,
t_a=t_a, t_b=t_b, normal_distr=normal_distr)
return d, p
def prior_sediment_memory(*args, **kwargs):
"""Get the prior density of sediment memory
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of Memory (ratio) values over which the density was evaluated.
"""
# "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)"
# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141
# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
mem_shape = kwargs['mem_strength'] # aka. `mem_shape`
mem_mean = kwargs['mem_mean']
x = np.linspace(0, 1, 100)
y = stats.beta.pdf(x, a=mem_shape * mem_mean,
b=mem_shape * (1 - mem_mean))
return y, x
|
brews/snakebacon
|
snakebacon/mcmcbackends/__init__.py
|
Bacon.prior_sediment_memory
|
python
|
def prior_sediment_memory(*args, **kwargs):
# "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)"
# PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141
# w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean)
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
mem_shape = kwargs['mem_strength'] # aka. `mem_shape`
mem_mean = kwargs['mem_mean']
x = np.linspace(0, 1, 100)
y = stats.beta.pdf(x, a=mem_shape * mem_mean,
b=mem_shape * (1 - mem_mean))
return y, x
|
Get the prior density of sediment memory
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of Memory (ratio) values over which the density was evaluated.
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/__init__.py#L73-L92
| null |
class Bacon:
def runmcmc(*args, **kwargs):
return run_baconmcmc(*args, **kwargs)
def prior_dates(*args, **kwargs):
"""Get the prior distribution of calibrated radiocarbon dates"""
try:
chron = args[0]
except IndexError:
chron = kwargs['coredates']
d_r = np.array(kwargs['d_r'])
d_std = np.array(kwargs['d_std'])
t_a = np.array(kwargs['t_a'])
t_b = np.array(kwargs['t_b'])
try:
normal_distr = kwargs['normal_distr']
except KeyError:
normal_distr = None
cc_int = kwargs['cc']
ccdict = {0: 'ConstCal', 1: 'IntCal3', 2: 'Marine13',
3: 'SHCal13', 4: 'ConstCal'}
# There is a better way to do this.
if 'cc1' in kwargs:
ccdict[1] = str(kwargs['cc1'])
if 'cc2' in kwargs:
ccdict[2] = str(kwargs['cc2'])
if 'cc3' in kwargs:
ccdict[3] = str(kwargs['cc3'])
if 'cc4' in kwargs:
ccdict[4] = str(kwargs['cc4'])
cc = []
for i in cc_int:
i = int(i)
cc.append(fetch_calibcurve(ccdict[i]))
d, p = calibrate_dates(chron, calib_curve=cc, d_r=d_r, d_std=d_std,
t_a=t_a, t_b=t_b, normal_distr=normal_distr)
return d, p
def prior_sediment_rate(*args, **kwargs):
"""Get the prior density of sediment rates
Returns
-------
y : ndarray
Array giving the density.
x : ndarray
Array of sediment accumulation values (yr/cm) over which the density was evaluated.
"""
# PlotAccPrior @ Bacon.R ln 113 -> ln 1097-1115
# alpha = acc_shape, beta = acc_shape / acc_mean
# TODO(brews): Check that these stats are correctly translated to scipy.stats distribs.
acc_mean = kwargs['acc_mean']
acc_shape = kwargs['acc_shape']
x = np.linspace(0, 6 * np.max(acc_mean), 100)
y = stats.gamma.pdf(x, a=acc_shape,
scale=1 / (acc_shape/acc_mean))
return y, x
|
brews/snakebacon
|
snakebacon/mcmcbackends/bacon/utils.py
|
d_cal
|
python
|
def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):
assert t_b - 1 == t_a
if normal_distr:
# TODO(brews): Test this. Line 946 of Bacon.R.
std = np.sqrt(calibcurve.error ** 2 + w2)
dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)
else:
# TODO(brews): Test this. Line 947 of Bacon.R.
dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))
cal = np.array([calibcurve.calbp.copy(), dens]).T
cal[:, 1] = cal[:, 1] / cal[:, 1].sum()
# "ensure that also very precise dates get a range of probabilities"
cutoff_mask = cal[:, 1] > cutoff
if cutoff_mask.sum() > 5:
out = cal[cutoff_mask, :]
else:
calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)
caly = np.interp(calx, cal[:, 0], cal[:, 1])
out = np.array([calx, caly / caly.sum()]).T
return out
|
Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/bacon/utils.py#L5-L49
| null |
import numpy as np
import scipy.stats as stats
def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]):
"""Get density of calendar dates for chron date segment in core
Parameters
----------
chron : DatedProxy-like
calib_curve : CalibCurve or list of CalibCurves
d_r : scalar or ndarray
Carbon reservoir offset.
d_std : scalar or ndarray
Carbon reservoir offset error standard deviation.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar or ndarray, optional
Student's t-distribution parameter, a. t_a - 1 must equal t_b.
t_b : scalar or ndarray, optional
Student's t-distribution parameter, b. t_a - 1 must equal t_b.
Returns
-------
depth : ndarray
Depth of dated sediment sample.
probs : list of 2d arrays
Density of calendar age for each dated sediment sample. For each
sediment sample, the 2d array has two columns, the first is the
calendar age. The second column is the density for that calendar age.
"""
# Python version of .bacon.calib() on line 908 in Bacon.R
# .bacon.calib - line 908
# rcmean = 4128; w2 = 4225; t_a=3; t_b=4
# test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,
# t_b=t_b, cutoff=cutoff, normal = normal)
# Line 959 of Bacon.R
# calib = list(dets.iloc[:, 3])
# Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R
# Line #973
# TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.
# TODO(brews): Check whether we call returned values densities, freqs or what options we should have.
n = len(chron.depth)
calib_curve = np.array(calib_curve)
t_a = np.array(t_a)
t_b = np.array(t_b)
assert t_b - 1 == t_a
d_r = np.array(d_r)
d_std = np.array(d_std)
if len(t_a) == 1:
t_a = np.repeat(t_a, n)
if len(t_b) == 1:
t_b = np.repeat(t_b, n)
if len(d_r) == 1:
d_r = np.repeat(d_r, n)
if len(d_std) == 1:
d_std = np.repeat(d_std, n)
if len(calib_curve) == 1:
calib_curve = np.repeat(calib_curve, n)
calib_probs = []
rcmean = chron.age - d_r
w2 = chron.error ** 2 + d_std ** 2
for i in range(n):
age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i],
t_a=t_a[i], t_b=t_b[i],
cutoff=cutoff, normal_distr=normal_distr)
calib_probs.append(age_realizations)
return np.array(chron.depth), calib_probs
|
brews/snakebacon
|
snakebacon/mcmcbackends/bacon/utils.py
|
calibrate_dates
|
python
|
def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]):
# Python version of .bacon.calib() on line 908 in Bacon.R
# .bacon.calib - line 908
# rcmean = 4128; w2 = 4225; t_a=3; t_b=4
# test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,
# t_b=t_b, cutoff=cutoff, normal = normal)
# Line 959 of Bacon.R
# calib = list(dets.iloc[:, 3])
# Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R
# Line #973
# TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.
# TODO(brews): Check whether we call returned values densities, freqs or what options we should have.
n = len(chron.depth)
calib_curve = np.array(calib_curve)
t_a = np.array(t_a)
t_b = np.array(t_b)
assert t_b - 1 == t_a
d_r = np.array(d_r)
d_std = np.array(d_std)
if len(t_a) == 1:
t_a = np.repeat(t_a, n)
if len(t_b) == 1:
t_b = np.repeat(t_b, n)
if len(d_r) == 1:
d_r = np.repeat(d_r, n)
if len(d_std) == 1:
d_std = np.repeat(d_std, n)
if len(calib_curve) == 1:
calib_curve = np.repeat(calib_curve, n)
calib_probs = []
rcmean = chron.age - d_r
w2 = chron.error ** 2 + d_std ** 2
for i in range(n):
age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i],
t_a=t_a[i], t_b=t_b[i],
cutoff=cutoff, normal_distr=normal_distr)
calib_probs.append(age_realizations)
return np.array(chron.depth), calib_probs
|
Get density of calendar dates for chron date segment in core
Parameters
----------
chron : DatedProxy-like
calib_curve : CalibCurve or list of CalibCurves
d_r : scalar or ndarray
Carbon reservoir offset.
d_std : scalar or ndarray
Carbon reservoir offset error standard deviation.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar or ndarray, optional
Student's t-distribution parameter, a. t_a - 1 must equal t_b.
t_b : scalar or ndarray, optional
Student's t-distribution parameter, b. t_a - 1 must equal t_b.
Returns
-------
depth : ndarray
Depth of dated sediment sample.
probs : list of 2d arrays
Density of calendar age for each dated sediment sample. For each
sediment sample, the 2d array has two columns, the first is the
calendar age. The second column is the density for that calendar age.
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/mcmcbackends/bacon/utils.py#L52-L123
|
[
"def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):\n \"\"\"Get calendar date probabilities\n\n Parameters\n ----------\n calibcurve : CalibCurve\n Calibration curve.\n rcmean : scalar\n Reservoir-adjusted age.\n w2 : scalar\n r'$w^2_j(\\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.\n cutoff : scalar, optional\n Unknown.\n normal_distr : Bool, optional\n Use normal distribution for date errors. If False, then use Student's t-distribution.\n t_a : scalar, optional\n Student's t-distribution parameter, a. t_b - 1 must equal t_b.\n t_b : scalar, optional\n Student's t-distribution parameter, b. t_b - 1 must equal t_b.\n\n\n #Line 943 of Bacon.R\n #cc : calib_curve (3-col format)\n #rcmean : det['age'][i] - d_R\n #w2 : dat['error'][i]^2 + d_STD**2\n \"\"\"\n assert t_b - 1 == t_a\n if normal_distr:\n # TODO(brews): Test this. Line 946 of Bacon.R.\n std = np.sqrt(calibcurve.error ** 2 + w2)\n dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)\n else:\n # TODO(brews): Test this. Line 947 of Bacon.R.\n dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))\n cal = np.array([calibcurve.calbp.copy(), dens]).T\n cal[:, 1] = cal[:, 1] / cal[:, 1].sum()\n # \"ensure that also very precise dates get a range of probabilities\"\n cutoff_mask = cal[:, 1] > cutoff\n if cutoff_mask.sum() > 5:\n out = cal[cutoff_mask, :]\n else:\n calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)\n caly = np.interp(calx, cal[:, 0], cal[:, 1])\n out = np.array([calx, caly / caly.sum()]).T\n return out\n"
] |
import numpy as np
import scipy.stats as stats
def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):
"""Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2
"""
assert t_b - 1 == t_a
if normal_distr:
# TODO(brews): Test this. Line 946 of Bacon.R.
std = np.sqrt(calibcurve.error ** 2 + w2)
dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)
else:
# TODO(brews): Test this. Line 947 of Bacon.R.
dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))
cal = np.array([calibcurve.calbp.copy(), dens]).T
cal[:, 1] = cal[:, 1] / cal[:, 1].sum()
# "ensure that also very precise dates get a range of probabilities"
cutoff_mask = cal[:, 1] > cutoff
if cutoff_mask.sum() > 5:
out = cal[cutoff_mask, :]
else:
calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)
caly = np.interp(calx, cal[:, 0], cal[:, 1])
out = np.array([calx, caly / caly.sum()]).T
return out
|
brews/snakebacon
|
snakebacon/utils.py
|
suggest_accumulation_rate
|
python
|
def suggest_accumulation_rate(chron):
# Follow's Bacon's method @ Bacon.R ln 30 - 44
# Suggested round vals.
sugg = np.tile([1, 2, 5], (4, 1)) * np.reshape(np.repeat([0.1, 1.0, 10, 100], 3), (4, 3))
# Get ballpark accumulation rates, uncalibrated dates.
ballpacc = stats.linregress(x=chron.depth, y=chron.age * 1.1).slope
ballpacc = np.abs(sugg - ballpacc)
sugg = sugg.flat[ballpacc.argmin()] # Suggest rounded acc.rate with lowest abs diff.
return sugg
|
From core age-depth data, suggest mean accumulation rate (cm/y)
|
train
|
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/utils.py#L5-L15
| null |
import numpy as np
import scipy.stats as stats
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
get_line
|
python
|
def get_line(thing):
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
|
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L6-L25
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
_sort_modules
|
python
|
def _sort_modules(mods):
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
|
Always sort `index` or `README` as first filename in list.
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L28-L42
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
refs_section
|
python
|
def refs_section(doc):
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
|
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L231-L256
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
examples_section
|
python
|
def examples_section(doc, header_level):
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
|
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L259-L280
|
[
"def mangle_examples(examples):\n was_in_python = False\n in_python = False\n lines = []\n for line in examples:\n if line.startswith(\">>>\"):\n in_python = True\n if line == \"\":\n in_python = False\n if not in_python and was_in_python:\n lines.append(\"\\n```\\n\")\n elif not in_python:\n lines.append(f\"{line} \")\n elif in_python and not was_in_python:\n lines.append(\"\\n```python\\n\")\n lines.append(re.sub(\">>> \", \"\", line) + \"\\n\")\n else:\n lines.append(re.sub(\">>> \", \"\", line) + \"\\n\")\n was_in_python = in_python\n if was_in_python:\n lines.append(\"\\n```\")\n lines.append(\"\\n\\n\")\n return lines\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
returns_section
|
python
|
def returns_section(thing, doc, header_level):
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
|
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L283-L357
|
[
"def mangle_types(types):\n default = re.findall(\"default .+\", types)\n mangled = []\n try:\n if len(default):\n default = re.sub(\"default (.+)\", r\"default ``\\1``\", default[0])\n mangled.append(default)\n types = re.sub(\"default .+\", \"\", types)\n curlied = re.findall(\"{.+}\", types)\n no_curls = re.subn(\"{.+},?\", \"\", types)[0]\n annotated = re.findall(\"[a-zA-Z]+\\[.+\\]\", no_curls)\n no_curls = re.subn(\"[a-zA-Z]+\\[.+\\],?\", \"\", no_curls)[0]\n ts = [t.strip() for t in no_curls.split(\",\")]\n ts = [t.split(\" or \") for t in ts]\n ts = [item for sublist in ts for item in sublist if item != \"\"]\n types = ts + curlied + annotated\n for ix, typ in enumerate(types):\n ts = [f\"``{t}``\" for t in typ.split(\" of \")]\n mangled.append(\" of \".join(ts))\n except Exception as e:\n # print(e)\n # print(default)\n # print(types)\n raise e\n output = reversed(mangled)\n\n return \", \".join(output)\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
summary
|
python
|
def summary(doc):
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
|
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L360-L381
|
[
"def fix_footnotes(s):\n return re.subn(\"\\[([0-9]+)\\]_\", r\"[^\\1]\", s)[0]\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
params_section
|
python
|
def params_section(thing, doc, header_level):
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
|
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L384-L409
|
[
"def type_list(signature, doc, header):\n \"\"\"\n Construct a list of types, preferring type annotations to\n docstrings if they are available.\n\n Parameters\n ----------\n signature : Signature\n Signature of thing\n doc : list of tuple\n Numpydoc's type list section\n\n Returns\n -------\n list of str\n Markdown formatted type list\n \"\"\"\n\n lines = []\n docced = set()\n lines.append(header)\n try:\n for names, types, description in doc:\n names, types = _get_names(names, types)\n unannotated = []\n for name in names:\n docced.add(name)\n try:\n typ = signature.parameters[name].annotation\n if typ == inspect._empty:\n raise AttributeError\n default = signature.parameters[name].default\n type_string = string_annotation(typ, default)\n lines.append(f\"- `{name}`: {type_string}\")\n lines.append(\"\\n\\n\")\n except (AttributeError, KeyError):\n unannotated.append(name) # No annotation\n\n if len(unannotated) > 0:\n lines.append(\"- \")\n lines.append(\", \".join(f\"`{name}`\" for name in unannotated))\n if types != \"\" and len(unannotated) > 0:\n lines.append(f\": {mangle_types(types)}\")\n lines.append(\"\\n\\n\")\n lines.append(f\" {' '.join(description)}\\n\\n\")\n for names, types, description in doc:\n names, types = _get_names(names, types)\n for name in names:\n if name not in docced:\n try:\n typ = signature.parameters[name].annotation\n default = signature.parameters[name].default\n type_string = string_annotation(typ, default)\n lines.append(f\"- `{name}`: {type_string}\")\n lines.append(\"\\n\\n\")\n except (AttributeError, KeyError):\n lines.append(f\"- `{name}`\")\n lines.append(\"\\n\\n\")\n except Exception as e:\n print(e)\n return lines if len(lines) > 1 else []\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
get_source_link
|
python
|
def get_source_link(thing, source_location):
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
|
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L429-L472
|
[
"def escape(string):\n \"\"\"\n Escape underscores in markdown.\n\n Parameters\n ----------\n string : str\n String to escape\n\n Returns\n -------\n str\n The string, with `_`s escaped with backslashes\n \"\"\"\n return string.replace(\"_\", \"\\\\_\")\n",
"def get_line(thing):\n \"\"\"\n Get the line number for something.\n Parameters\n ----------\n thing : function, class, module\n\n Returns\n -------\n int\n Line number in the source file\n \"\"\"\n try:\n return inspect.getsourcelines(thing)[1]\n except TypeError:\n # Might be a property\n return inspect.getsourcelines(thing.fget)[1]\n except Exception as e:\n # print(thing)\n raise e\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
get_signature
|
python
|
def get_signature(name, thing):
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
|
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L475-L503
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
_get_names
|
python
|
def _get_names(names, types):
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
|
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L506-L516
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
string_annotation
|
python
|
def string_annotation(typ, default):
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
|
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L519-L549
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
type_list
|
python
|
def type_list(signature, doc, header):
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
|
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L552-L612
|
[
"def mangle_types(types):\n default = re.findall(\"default .+\", types)\n mangled = []\n try:\n if len(default):\n default = re.sub(\"default (.+)\", r\"default ``\\1``\", default[0])\n mangled.append(default)\n types = re.sub(\"default .+\", \"\", types)\n curlied = re.findall(\"{.+}\", types)\n no_curls = re.subn(\"{.+},?\", \"\", types)[0]\n annotated = re.findall(\"[a-zA-Z]+\\[.+\\]\", no_curls)\n no_curls = re.subn(\"[a-zA-Z]+\\[.+\\],?\", \"\", no_curls)[0]\n ts = [t.strip() for t in no_curls.split(\",\")]\n ts = [t.split(\" or \") for t in ts]\n ts = [item for sublist in ts for item in sublist if item != \"\"]\n types = ts + curlied + annotated\n for ix, typ in enumerate(types):\n ts = [f\"``{t}``\" for t in typ.split(\" of \")]\n mangled.append(\" of \".join(ts))\n except Exception as e:\n # print(e)\n # print(default)\n # print(types)\n raise e\n output = reversed(mangled)\n\n return \", \".join(output)\n",
"def _get_names(names, types):\n \"\"\"\n Get names, bearing in mind that there might be no name,\n no type, and that the `:` separator might be wrongly used.\n \"\"\"\n if types == \"\":\n try:\n names, types = names.split(\":\")\n except:\n pass\n return names.split(\",\"), types\n",
"def string_annotation(typ, default):\n \"\"\"\n Construct a string representation of a type annotation.\n\n Parameters\n ----------\n typ : type\n Type to turn into a string\n default : any\n Default value (if any) of the type\n\n Returns\n -------\n str\n String version of the type annotation\n \"\"\"\n try:\n type_string = (\n f\"`{typ.__name__}`\"\n if typ.__module__ == \"builtins\"\n else f\"`{typ.__module__}.{typ.__name__}`\"\n )\n except AttributeError:\n type_string = f\"`{str(typ)}`\"\n if default is None:\n type_string = f\"{type_string}, default ``None``\"\n elif default == inspect._empty:\n pass\n else:\n type_string = f\"{type_string}, default ``{default}``\"\n return type_string\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
_split_props
|
python
|
def _split_props(thing, doc):
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
|
Separate properties from other kinds of member.
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L615-L632
| null |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
attributes_section
|
python
|
def attributes_section(thing, doc, header_level):
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
|
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L635-L666
|
[
"def type_list(signature, doc, header):\n \"\"\"\n Construct a list of types, preferring type annotations to\n docstrings if they are available.\n\n Parameters\n ----------\n signature : Signature\n Signature of thing\n doc : list of tuple\n Numpydoc's type list section\n\n Returns\n -------\n list of str\n Markdown formatted type list\n \"\"\"\n\n lines = []\n docced = set()\n lines.append(header)\n try:\n for names, types, description in doc:\n names, types = _get_names(names, types)\n unannotated = []\n for name in names:\n docced.add(name)\n try:\n typ = signature.parameters[name].annotation\n if typ == inspect._empty:\n raise AttributeError\n default = signature.parameters[name].default\n type_string = string_annotation(typ, default)\n lines.append(f\"- `{name}`: {type_string}\")\n lines.append(\"\\n\\n\")\n except (AttributeError, KeyError):\n unannotated.append(name) # No annotation\n\n if len(unannotated) > 0:\n lines.append(\"- \")\n lines.append(\", \".join(f\"`{name}`\" for name in unannotated))\n if types != \"\" and len(unannotated) > 0:\n lines.append(f\": {mangle_types(types)}\")\n lines.append(\"\\n\\n\")\n lines.append(f\" {' '.join(description)}\\n\\n\")\n for names, types, description in doc:\n names, types = _get_names(names, types)\n for name in names:\n if name not in docced:\n try:\n typ = signature.parameters[name].annotation\n default = signature.parameters[name].default\n type_string = string_annotation(typ, default)\n lines.append(f\"- `{name}`: {type_string}\")\n lines.append(\"\\n\\n\")\n except (AttributeError, KeyError):\n lines.append(f\"- `{name}`\")\n lines.append(\"\\n\\n\")\n except Exception as e:\n print(e)\n return lines if len(lines) > 1 else []\n",
"def _split_props(thing, doc):\n \"\"\"\n Separate properties from other kinds of member.\n \"\"\"\n props = inspect.getmembers(thing, lambda o: isinstance(o, property))\n ps = []\n docs = [\n (*_get_names(names, types), names, types, desc) for names, types, desc in doc\n ]\n for prop_name, prop in props:\n in_doc = [d for d in enumerate(docs) if prop_name in d[0]]\n for d in in_doc:\n docs.remove(d)\n ps.append(prop_name)\n if len(docs) > 0:\n _, _, names, types, descs = zip(*docs)\n return ps, zip(names, types, descs)\n return ps, []\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
enum_doc
|
python
|
def enum_doc(name, enum, header_level, source_location):
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
|
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L669-L695
|
[
"def summary(doc):\n \"\"\"\n Generate markdown for summary section.\n\n Parameters\n ----------\n doc : dict\n Output from numpydoc\n\n Returns\n -------\n list of str\n Markdown strings\n \"\"\"\n lines = []\n if \"Summary\" in doc and len(doc[\"Summary\"]) > 0:\n lines.append(fix_footnotes(\" \".join(doc[\"Summary\"])))\n lines.append(\"\\n\")\n if \"Extended Summary\" in doc and len(doc[\"Extended Summary\"]) > 0:\n lines.append(fix_footnotes(\" \".join(doc[\"Extended Summary\"])))\n lines.append(\"\\n\")\n return lines\n",
"def get_source_link(thing, source_location):\n \"\"\"\n Get a link to the line number a module/class/function is defined at.\n\n Parameters\n ----------\n thing : function or class\n Thing to get the link for\n source_location : str\n GitHub url of the source code\n\n Returns\n -------\n str\n String with link to the file & line number, or empty string if it\n couldn't be found\n \"\"\"\n try:\n lineno = get_line(thing)\n try:\n owner_module = inspect.getmodule(thing)\n assert owner_module is not None\n except (TypeError, AssertionError):\n owner_module = inspect.getmodule(thing.fget)\n\n thing_file = \"/\".join(owner_module.__name__.split(\".\"))\n if owner_module.__file__.endswith(\"__init__.py\"):\n thing_file += \"/__init__.py\"\n else:\n thing_file += \".py\"\n return (\n f\"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})\"\n + \"\\n\\n\"\n )\n except Exception as e:\n # print(\"Failed to find source file.\")\n # print(e)\n # print(lineno)\n # print(thing)\n # print(owner_module)\n # print(thing_file)\n # print(source_location)\n pass\n return \"\"\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
to_doc
|
python
|
def to_doc(name, thing, header_level, source_location):
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
|
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L698-L738
|
[
"def summary(doc):\n \"\"\"\n Generate markdown for summary section.\n\n Parameters\n ----------\n doc : dict\n Output from numpydoc\n\n Returns\n -------\n list of str\n Markdown strings\n \"\"\"\n lines = []\n if \"Summary\" in doc and len(doc[\"Summary\"]) > 0:\n lines.append(fix_footnotes(\" \".join(doc[\"Summary\"])))\n lines.append(\"\\n\")\n if \"Extended Summary\" in doc and len(doc[\"Extended Summary\"]) > 0:\n lines.append(fix_footnotes(\" \".join(doc[\"Extended Summary\"])))\n lines.append(\"\\n\")\n return lines\n",
"def notes_section(doc):\n lines = []\n if \"Notes\" in doc and len(doc[\"Notes\"]) > 0:\n lines.append(\"!!! note\\n\")\n lines.append(f\" {' '.join(doc['Notes'])}\\n\\n\")\n return lines\n",
"def refs_section(doc):\n \"\"\"\n Generate a References section.\n\n Parameters\n ----------\n doc : dict\n Dictionary produced by numpydoc\n\n Returns\n -------\n list of str\n Markdown for references section\n \"\"\"\n lines = []\n if \"References\" in doc and len(doc[\"References\"]) > 0:\n # print(\"Found refs\")\n for ref in doc[\"References\"]:\n # print(ref)\n ref_num = re.findall(\"\\[([0-9]+)\\]\", ref)[0]\n # print(ref_num)\n ref_body = \" \".join(ref.split(\" \")[2:])\n # print(f\"[^{ref_num}] {ref_body}\" + \"\\n\")\n lines.append(f\"[^{ref_num}]: {ref_body}\" + \"\\n\\n\")\n # print(lines)\n return lines\n",
"def examples_section(doc, header_level):\n \"\"\"\n Generate markdown for Examples section.\n\n Parameters\n ----------\n doc : dict\n Dict from numpydoc\n header_level : int\n Number of `#`s to use for header\n\n Returns\n -------\n list of str\n Markdown for examples section\n \"\"\"\n lines = []\n if \"Examples\" in doc and len(doc[\"Examples\"]) > 0:\n lines.append(f\"{'#'*(header_level+1)} Examples \\n\")\n egs = \"\\n\".join(doc[\"Examples\"])\n lines += mangle_examples(doc[\"Examples\"])\n return lines\n",
"def returns_section(thing, doc, header_level):\n \"\"\"\n Generate markdown for Returns section.\n\n Parameters\n ----------\n thing : function\n Function to produce returns for\n doc : dict\n Dict from numpydoc\n header_level : int\n Number of `#`s to use for header\n\n Returns\n -------\n list of str\n Markdown for examples section\n \"\"\"\n lines = []\n return_type = None\n try:\n return_type = thing.__annotations__[\"return\"]\n except AttributeError:\n try:\n return_type = thing.fget.__annotations__[\"return\"]\n except:\n pass\n except KeyError:\n pass\n if return_type is None:\n return_type = \"\"\n else:\n # print(f\"{thing} has annotated return type {return_type}\")\n try:\n return_type = (\n f\"{return_type.__name__}\"\n if return_type.__module__ == \"builtins\"\n else f\"{return_type.__module__}.{return_type.__name__}\"\n )\n except AttributeError:\n return_type = str(return_type)\n # print(return_type)\n\n try:\n if \"Returns\" in doc and len(doc[\"Returns\"]) > 0 or return_type != \"\":\n lines.append(f\"{'#'*(header_level+1)} Returns\\n\")\n if return_type != \"\" and len(doc[\"Returns\"]) == 1:\n name, typ, desc = doc[\"Returns\"][0]\n if typ != \"\":\n lines.append(f\"- `{name}`: ``{return_type}``\")\n else:\n lines.append(f\"- ``{return_type}``\")\n lines.append(\"\\n\\n\")\n if desc != \"\":\n lines.append(f\" {' '.join(desc)}\\n\\n\")\n elif return_type != \"\":\n lines.append(f\"- ``{return_type}``\")\n lines.append(\"\\n\\n\")\n else:\n for name, typ, desc in doc[\"Returns\"]:\n if \":\" in name:\n name, typ = name.split(\":\")\n\n if typ != \"\":\n line = f\"- `{name}`: {mangle_types(typ)}\"\n else:\n line = f\"- {mangle_types(name)}\"\n line += \"\\n\\n\"\n lines.append(line)\n lines.append(f\" {' '.join(desc)}\\n\\n\")\n except Exception as e:\n # print(e)\n # print(doc)\n pass\n return lines\n",
"def params_section(thing, doc, header_level):\n \"\"\"\n Generate markdown for Parameters section.\n\n Parameters\n ----------\n thing : functuon\n Function to produce parameters from\n doc : dict\n Dict from numpydoc\n header_level : int\n Number of `#`s to use for header\n\n Returns\n -------\n list of str\n Markdown for examples section\n \"\"\"\n lines = []\n\n class_doc = doc[\"Parameters\"]\n return type_list(\n inspect.signature(thing),\n class_doc,\n \"#\" * (header_level + 1) + \" Parameters\\n\\n\",\n )\n",
"def get_source_link(thing, source_location):\n \"\"\"\n Get a link to the line number a module/class/function is defined at.\n\n Parameters\n ----------\n thing : function or class\n Thing to get the link for\n source_location : str\n GitHub url of the source code\n\n Returns\n -------\n str\n String with link to the file & line number, or empty string if it\n couldn't be found\n \"\"\"\n try:\n lineno = get_line(thing)\n try:\n owner_module = inspect.getmodule(thing)\n assert owner_module is not None\n except (TypeError, AssertionError):\n owner_module = inspect.getmodule(thing.fget)\n\n thing_file = \"/\".join(owner_module.__name__.split(\".\"))\n if owner_module.__file__.endswith(\"__init__.py\"):\n thing_file += \"/__init__.py\"\n else:\n thing_file += \".py\"\n return (\n f\"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})\"\n + \"\\n\\n\"\n )\n except Exception as e:\n # print(\"Failed to find source file.\")\n # print(e)\n # print(lineno)\n # print(thing)\n # print(owner_module)\n # print(thing_file)\n # print(source_location)\n pass\n return \"\"\n",
"def get_signature(name, thing):\n \"\"\"\n Get the signature for a function or class, formatted nicely if possible.\n\n Parameters\n ----------\n name : str\n Name of the thing, used as the first part of the signature\n thing : class or function\n Thing to get the signature of\n \"\"\"\n if inspect.ismodule(thing):\n return \"\"\n if isinstance(thing, property):\n func_sig = name\n else:\n try:\n sig = inspect.signature(thing)\n except TypeError:\n sig = inspect.signature(thing.fget)\n except ValueError:\n return \"\"\n func_sig = f\"{name}{sig}\"\n try:\n mode = black.FileMode(line_length=80)\n func_sig = black.format_str(func_sig, mode=mode).strip()\n except (ValueError, TypeError):\n pass\n return f\"```python\\n{func_sig}\\n```\\n\"\n",
"def attributes_section(thing, doc, header_level):\n \"\"\"\n Generate an attributes section for classes.\n\n Prefers type annotations, if they are present.\n\n Parameters\n ----------\n thing : class\n Class to document\n doc : dict\n Numpydoc output\n header_level : int\n Number of `#`s to use for header\n\n Returns\n -------\n list of str\n Markdown formatted attribute list\n \"\"\"\n # Get Attributes\n\n if not inspect.isclass(thing):\n return []\n\n props, class_doc = _split_props(thing, doc[\"Attributes\"])\n tl = type_list(inspect.signature(thing), class_doc, \"\\n### Attributes\\n\\n\")\n if len(tl) == 0 and len(props) > 0:\n tl.append(\"\\n### Attributes\\n\\n\")\n for prop in props:\n tl.append(f\"- [`{prop}`](#{prop})\\n\\n\")\n return tl\n",
"def enum_doc(name, enum, header_level, source_location):\n \"\"\"\n Generate markdown for an enum\n\n Parameters\n ----------\n name : str\n Name of the thing being documented\n enum : EnumMeta\n Enum to document\n header_level : int\n Heading level\n source_location : str\n URL of repo containing source code\n \"\"\"\n\n lines = [f\"{'#'*header_level} Enum **{name}**\\n\\n\"]\n lines.append(f\"```python\\n{name}\\n```\\n\")\n lines.append(get_source_link(enum, source_location))\n try:\n doc = NumpyDocString(inspect.getdoc(thing))._parsed_data\n lines += summary(doc)\n except:\n pass\n lines.append(f\"{'#'*(header_level + 1)} Members\\n\\n\")\n lines += [f\"- `{str(v).split('.').pop()}`: `{v.value}` \\n\\n\" for v in enum]\n return lines\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def doc_module(module_name, module, output_dir, source_location, leaf):
"""
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
"""
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/mkapi.py
|
doc_module
|
python
|
def doc_module(module_name, module, output_dir, source_location, leaf):
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc)
|
Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/mkapi.py#L741-L789
|
[
"def get_classes(module):\n return set(\n [\n x\n for x in inspect.getmembers(module, inspect.isclass)\n if (not x[0].startswith(\"_\"))\n and x[1].__module__ == module.__name__\n and not type(x[1]) is enum.EnumMeta\n ]\n )\n",
"def get_enums(module):\n return set(\n [\n x\n for x in inspect.getmembers(module, inspect.isclass)\n if (not x[0].startswith(\"_\"))\n and x[1].__module__ == module.__name__\n and type(x[1]) is enum.EnumMeta\n ]\n )\n",
"def get_funcs(module):\n return set(\n [\n x\n for x in inspect.getmembers(module, inspect.isfunction)\n if (not x[0].startswith(\"_\")) and x[1].__module__ == module.__name__\n ]\n )\n",
"def get_available_classes(module):\n shared_root = module.__name__.split(\".\")[0]\n return set(\n [\n x\n for x in inspect.getmembers(module, inspect.isclass)\n if (not x[0].startswith(\"_\"))\n and x[1].__module__.split(\".\")[0] == shared_root\n ]\n )\n",
"def to_doc(name, thing, header_level, source_location):\n \"\"\"\n Generate markdown for a class or function\n\n Parameters\n ----------\n name : str\n Name of the thing being documented\n thing : class or function\n Class or function to document\n header_level : int\n Heading level\n source_location : str\n URL of repo containing source code\n \"\"\"\n\n if type(thing) is enum.EnumMeta:\n return enum_doc(name, thing, header_level, source_location)\n if inspect.isclass(thing):\n header = f\"{'#'*header_level} Class **{name}**\\n\\n\"\n else:\n header = f\"{'#'*header_level} {name}\\n\\n\"\n lines = [\n header,\n get_signature(name, thing),\n get_source_link(thing, source_location),\n ]\n\n try:\n doc = NumpyDocString(inspect.getdoc(thing))._parsed_data\n lines += summary(doc)\n lines += attributes_section(thing, doc, header_level)\n lines += params_section(thing, doc, header_level)\n lines += returns_section(thing, doc, header_level)\n lines += examples_section(doc, header_level)\n lines += notes_section(doc)\n lines += refs_section(doc)\n except Exception as e:\n # print(f\"No docstring for {name}, src {source_location}: {e}\")\n pass\n return lines\n"
] |
import inspect, os, pathlib, importlib, black, re, click, enum
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from functools import cmp_to_key
def get_line(thing):
"""
Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file
"""
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e
def _sort_modules(mods):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare))
def get_submodule_files(module, hide=["_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent
for root, dirs, files in os.walk(module_file):
module_path = pathlib.Path(root).relative_to(module_file.parent)
if not module_path.parts[-1].startswith("_"):
try:
for file in files:
module_name = (
"" if "__init__.py" == file else inspect.getmodulename(file)
)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join((module_path / module_name).parts)
)
modules.add((submodule, module_path / file))
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
return _sort_modules(modules)
def get_all_modules_from_files(module, hide=["__init__", "_version"]):
modules = set()
module_file = pathlib.Path(module.__file__).parent.parent
dir_was = pathlib.Path().absolute()
os.chdir(module_file)
for root, dirs, files in os.walk(module.__name__):
module_path = pathlib.Path(root)
if not module_path.parts[-1].startswith("_"):
try:
module = importlib.import_module(".".join(module_path.parts))
if not module.__name__.startswith("_"):
modules.add((module.__name__, module, False, module_path))
for file in files:
module_name = inspect.getmodulename(file)
if module_name is not None and module_name not in hide:
submodule = importlib.import_module(
".".join(
(module_path / inspect.getmodulename(file)).parts
)
)
if not module.__name__.startswith(
"_"
) and not submodule.__name__.startswith("_"):
modules.add(
(
submodule.__name__,
submodule,
True,
module_path.absolute() / file,
)
)
except ModuleNotFoundError:
print(f"Skipping {'.'.join(module_path.parts)} - not a module.")
os.chdir(dir_was)
return modules
def get_classes(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and not type(x[1]) is enum.EnumMeta
]
)
def get_enums(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__ == module.__name__
and type(x[1]) is enum.EnumMeta
]
)
def get_funcs(module):
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_")) and x[1].__module__ == module.__name__
]
)
def get_available_funcs(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isfunction)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def get_available_classes(module):
shared_root = module.__name__.split(".")[0]
return set(
[
x
for x in inspect.getmembers(module, inspect.isclass)
if (not x[0].startswith("_"))
and x[1].__module__.split(".")[0] == shared_root
]
)
def deffed_here(thing, holder):
return inspect.getfile(thing) == inspect.getfile(holder)
def fix_footnotes(s):
return re.subn("\[([0-9]+)\]_", r"[^\1]", s)[0]
def mangle_types(types):
default = re.findall("default .+", types)
mangled = []
try:
if len(default):
default = re.sub("default (.+)", r"default ``\1``", default[0])
mangled.append(default)
types = re.sub("default .+", "", types)
curlied = re.findall("{.+}", types)
no_curls = re.subn("{.+},?", "", types)[0]
annotated = re.findall("[a-zA-Z]+\[.+\]", no_curls)
no_curls = re.subn("[a-zA-Z]+\[.+\],?", "", no_curls)[0]
ts = [t.strip() for t in no_curls.split(",")]
ts = [t.split(" or ") for t in ts]
ts = [item for sublist in ts for item in sublist if item != ""]
types = ts + curlied + annotated
for ix, typ in enumerate(types):
ts = [f"``{t}``" for t in typ.split(" of ")]
mangled.append(" of ".join(ts))
except Exception as e:
# print(e)
# print(default)
# print(types)
raise e
output = reversed(mangled)
return ", ".join(output)
def mangle_examples(examples):
was_in_python = False
in_python = False
lines = []
for line in examples:
if line.startswith(">>>"):
in_python = True
if line == "":
in_python = False
if not in_python and was_in_python:
lines.append("\n```\n")
elif not in_python:
lines.append(f"{line} ")
elif in_python and not was_in_python:
lines.append("\n```python\n")
lines.append(re.sub(">>> ", "", line) + "\n")
else:
lines.append(re.sub(">>> ", "", line) + "\n")
was_in_python = in_python
if was_in_python:
lines.append("\n```")
lines.append("\n\n")
return lines
def notes_section(doc):
lines = []
if "Notes" in doc and len(doc["Notes"]) > 0:
lines.append("!!! note\n")
lines.append(f" {' '.join(doc['Notes'])}\n\n")
return lines
def refs_section(doc):
"""
Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section
"""
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines
def examples_section(doc, header_level):
"""
Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines
def returns_section(thing, doc, header_level):
"""
Generate markdown for Returns section.
Parameters
----------
thing : function
Function to produce returns for
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
return_type = None
try:
return_type = thing.__annotations__["return"]
except AttributeError:
try:
return_type = thing.fget.__annotations__["return"]
except:
pass
except KeyError:
pass
if return_type is None:
return_type = ""
else:
# print(f"{thing} has annotated return type {return_type}")
try:
return_type = (
f"{return_type.__name__}"
if return_type.__module__ == "builtins"
else f"{return_type.__module__}.{return_type.__name__}"
)
except AttributeError:
return_type = str(return_type)
# print(return_type)
try:
if "Returns" in doc and len(doc["Returns"]) > 0 or return_type != "":
lines.append(f"{'#'*(header_level+1)} Returns\n")
if return_type != "" and len(doc["Returns"]) == 1:
name, typ, desc = doc["Returns"][0]
if typ != "":
lines.append(f"- `{name}`: ``{return_type}``")
else:
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
if desc != "":
lines.append(f" {' '.join(desc)}\n\n")
elif return_type != "":
lines.append(f"- ``{return_type}``")
lines.append("\n\n")
else:
for name, typ, desc in doc["Returns"]:
if ":" in name:
name, typ = name.split(":")
if typ != "":
line = f"- `{name}`: {mangle_types(typ)}"
else:
line = f"- {mangle_types(name)}"
line += "\n\n"
lines.append(line)
lines.append(f" {' '.join(desc)}\n\n")
except Exception as e:
# print(e)
# print(doc)
pass
return lines
def summary(doc):
"""
Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings
"""
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines
def params_section(thing, doc, header_level):
"""
Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section
"""
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
)
def escape(string):
"""
Escape underscores in markdown.
Parameters
----------
string : str
String to escape
Returns
-------
str
The string, with `_`s escaped with backslashes
"""
return string.replace("_", "\\_")
def get_source_link(thing, source_location):
"""
Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found
"""
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return ""
def get_signature(name, thing):
"""
Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of
"""
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n"
def _get_names(names, types):
"""
Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used.
"""
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types
def string_annotation(typ, default):
"""
Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation
"""
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string
def type_list(signature, doc, header):
"""
Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list
"""
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else []
def _split_props(thing, doc):
"""
Separate properties from other kinds of member.
"""
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, []
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl
def enum_doc(name, enum, header_level, source_location):
"""
Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines
def to_doc(name, thing, header_level, source_location):
"""
Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code
"""
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines
@click.command()
@click.argument("module_name")
@click.argument("output_dir")
@click.argument("source-location")
def cli(module_name, output_dir, source_location):
make_api_doc(module_name, output_dir, source_location)
def make_api_doc(module_name, output_dir, source_location):
module = importlib.import_module(module_name)
output_dir = pathlib.Path(output_dir).absolute()
files = []
for module_name, module, leaf, file in get_all_modules_from_files(module):
# print(module_name)
def do_doc():
doc_path, doc = doc_module(
module_name, module, output_dir, source_location, leaf
)
with open(doc_path.absolute(), "w") as doc_file:
doc_file.write(doc)
do_doc()
files.append((file, do_doc))
print(f"Built documentation for {file.absolute()}")
return files
if __name__ == "__main__":
cli()
|
greenape/mktheapidocs
|
mktheapidocs/plugin.py
|
PyDocFile._get_stem
|
python
|
def _get_stem(self):
filename = os.path.basename(self.src_path)
stem, ext = os.path.splitext(filename)
return "index" if stem in ("index", "README", "__init__") else stem
|
Return the name of the file without it's extension.
|
train
|
https://github.com/greenape/mktheapidocs/blob/a45e8b43ddd80ed360fe1e98d4f73dc11c4e7bf7/mktheapidocs/plugin.py#L21-L25
| null |
class PyDocFile(mkdocs.structure.files.File):
def __init__(self, path, src_dir, dest_dir, use_directory_urls, parent):
self.parent = parent
super().__init__(path, src_dir, dest_dir, use_directory_urls)
self.abs_src_path = self.parent
def is_documentation_page(self):
return True
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
render_form
|
python
|
def render_form(form, exclude=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field
Args:
form (form): Django Form
exclude (string): exclude fields by name, separated by commas
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
if hasattr(form, "Meta") and hasattr(form.Meta, "layout"):
return render_layout_form(form, getattr(form.Meta, "layout"), **kwargs)
if exclude:
exclude = exclude.split(",")
for field in exclude:
form.fields.pop(field)
return mark_safe("".join([
render_field(field, **kwargs) for field in form
]))
|
Render an entire form with Semantic UI wrappers for each field
Args:
form (form): Django Form
exclude (string): exclude fields by name, separated by commas
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L153-L174
| null |
from django import template
from django.utils.html import format_html, escape
from django.utils.safestring import mark_safe
from ..wrappers import *
from ..utils import pad
from ..fields import FIELDS
register = template.Library()
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
@register.simple_tag
def render_field(field, **kwargs):
"""Render field in a Semantic UI wrapper
Args:
field (BoundField): Form field
**kwargs (dict): Keyword arguments to add onto field
Returns:
string: HTML code for field
"""
if field:
return Field(field, **kwargs).render()
@register.simple_tag()
def render_form(form, exclude=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field
Args:
form (form): Django Form
exclude (string): exclude fields by name, separated by commas
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
if hasattr(form, "Meta") and hasattr(form.Meta, "layout"):
return render_layout_form(form, getattr(form.Meta, "layout"), **kwargs)
if exclude:
exclude = exclude.split(",")
for field in exclude:
form.fields.pop(field)
return mark_safe("".join([
render_field(field, **kwargs) for field in form
]))
@register.simple_tag()
def render_layout_form(form, layout=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field with
a layout provided in the template or in the form class
Args:
form (form): Django Form
layout (tuple): layout design
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
def make_component(type_, *args):
"""Loop through tuples to make field wrappers for fields.
"""
if type_ == "Text":
return "".join(args)
elif type_ == "Field":
result = ""
for c in args:
if isinstance(c, tuple):
result += make_component(*c)
elif isinstance(c, str):
result += render_field(form.__getitem__(c), **kwargs)
return result
else:
if len(args) < 2:
return ""
result = "".join([make_component(*c) for c in args])
if type_:
return "<div class=\"%s\">%s</div>" % (type_.lower(), result)
else:
return result
return mark_safe("".join([make_component(*component) for component in layout]))
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
render_layout_form
|
python
|
def render_layout_form(form, layout=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field with
a layout provided in the template or in the form class
Args:
form (form): Django Form
layout (tuple): layout design
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
def make_component(type_, *args):
"""Loop through tuples to make field wrappers for fields.
"""
if type_ == "Text":
return "".join(args)
elif type_ == "Field":
result = ""
for c in args:
if isinstance(c, tuple):
result += make_component(*c)
elif isinstance(c, str):
result += render_field(form.__getitem__(c), **kwargs)
return result
else:
if len(args) < 2:
return ""
result = "".join([make_component(*c) for c in args])
if type_:
return "<div class=\"%s\">%s</div>" % (type_.lower(), result)
else:
return result
return mark_safe("".join([make_component(*component) for component in layout]))
|
Render an entire form with Semantic UI wrappers for each field with
a layout provided in the template or in the form class
Args:
form (form): Django Form
layout (tuple): layout design
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L178-L214
| null |
from django import template
from django.utils.html import format_html, escape
from django.utils.safestring import mark_safe
from ..wrappers import *
from ..utils import pad
from ..fields import FIELDS
register = template.Library()
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
@register.simple_tag
def render_field(field, **kwargs):
"""Render field in a Semantic UI wrapper
Args:
field (BoundField): Form field
**kwargs (dict): Keyword arguments to add onto field
Returns:
string: HTML code for field
"""
if field:
return Field(field, **kwargs).render()
@register.simple_tag()
def render_form(form, exclude=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field
Args:
form (form): Django Form
exclude (string): exclude fields by name, separated by commas
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
if hasattr(form, "Meta") and hasattr(form.Meta, "layout"):
return render_layout_form(form, getattr(form.Meta, "layout"), **kwargs)
if exclude:
exclude = exclude.split(",")
for field in exclude:
form.fields.pop(field)
return mark_safe("".join([
render_field(field, **kwargs) for field in form
]))
@register.simple_tag()
def render_layout_form(form, layout=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field with
a layout provided in the template or in the form class
Args:
form (form): Django Form
layout (tuple): layout design
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
def make_component(type_, *args):
"""Loop through tuples to make field wrappers for fields.
"""
if type_ == "Text":
return "".join(args)
elif type_ == "Field":
result = ""
for c in args:
if isinstance(c, tuple):
result += make_component(*c)
elif isinstance(c, str):
result += render_field(form.__getitem__(c), **kwargs)
return result
else:
if len(args) < 2:
return ""
result = "".join([make_component(*c) for c in args])
if type_:
return "<div class=\"%s\">%s</div>" % (type_.lower(), result)
else:
return result
return mark_safe("".join([make_component(*component) for component in layout]))
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_input
|
python
|
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
|
Returns form input field of Field.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L36-L40
| null |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_label
|
python
|
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
|
Set label markup.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L43-L51
| null |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_help
|
python
|
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
|
Set help text markup.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L54-L60
| null |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_errors
|
python
|
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
|
Set errors markup.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L63-L72
| null |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_icon
|
python
|
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
|
Wrap current field with icon wrapper.
This setter must be the last setter called.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L75-L90
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n"
] |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.set_classes
|
python
|
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
|
Set field properties and custom classes.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L93-L112
| null |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
semanticuiforms/templatetags/semanticui.py
|
Field.render
|
python
|
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
Render field as HTML.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/templatetags/semanticui.py#L115-L133
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n",
"def set_input(self):\n\t\"\"\"Returns form input field of Field.\n\t\"\"\"\n\tname = self.attrs.get(\"_override\", self.widget.__class__.__name__)\n\tself.values[\"field\"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))\n"
] |
class Field():
"""
Semantic UI Form Field.
"""
def __init__(self, field, **kwargs):
"""Initializer for Field class.
Args:
field (BoundField): Form field
**kwargs (dict): Field attributes
"""
# Kwargs will always be additional attributes
self.attrs = kwargs
self.attrs.update(field.field.widget.attrs)
# Field
self.field = field
self.widget = field.field.widget
# Defaults
self.values = {"class": [], "label": "", "help": "", "errors": ""}
def set_input(self):
"""Returns form input field of Field.
"""
name = self.attrs.get("_override", self.widget.__class__.__name__)
self.values["field"] = str(FIELDS.get(name, FIELDS.get(None))(self.field, self.attrs))
def set_label(self):
"""Set label markup.
"""
if not self.field.label or self.attrs.get("_no_label"):
return
self.values["label"] = format_html(
LABEL_TEMPLATE, self.field.html_name, mark_safe(self.field.label)
)
def set_help(self):
"""Set help text markup.
"""
if not (self.field.help_text and self.attrs.get("_help")):
return
self.values["help"] = HELP_TEMPLATE.format(self.field.help_text)
def set_errors(self):
"""Set errors markup.
"""
if not self.field.errors or self.attrs.get("_no_errors"):
return
self.values["class"].append("error")
for error in self.field.errors:
self.values["errors"] += ERROR_WRAPPER % {"message": error}
def set_icon(self):
"""Wrap current field with icon wrapper.
This setter must be the last setter called.
"""
if not self.attrs.get("_icon"):
return
if "Date" in self.field.field.__class__.__name__:
return
self.values["field"] = INPUT_WRAPPER % {
"field": self.values["field"],
"help": self.values["help"],
"style": "%sicon " % escape(pad(self.attrs.get("_align", ""))),
"icon": format_html(ICON_TEMPLATE, self.attrs.get("_icon")),
}
def set_classes(self):
"""Set field properties and custom classes.
"""
# Custom field classes on field wrapper
if self.attrs.get("_field_class"):
self.values["class"].append(escape(self.attrs.get("_field_class")))
# Inline class
if self.attrs.get("_inline"):
self.values["class"].append("inline")
# Disabled class
if self.field.field.disabled:
self.values["class"].append("disabled")
# Required class
if self.field.field.required and not self.attrs.get("_no_required"):
self.values["class"].append("required")
elif self.attrs.get("_required") and not self.field.field.required:
self.values["class"].append("required")
def render(self):
"""Render field as HTML.
"""
self.widget.attrs = {
k: v for k, v in self.attrs.items() if k[0] != "_"
}
self.set_input()
if not self.attrs.get("_no_wrapper"):
self.set_label()
self.set_help()
self.set_errors()
self.set_classes()
self.set_icon() # Must be the bottom-most setter
self.values["class"] = pad(" ".join(self.values["class"])).lstrip()
result = mark_safe(FIELD_WRAPPER % self.values)
self.widget.attrs = self.attrs # Re-assign variables
return result
|
thetarkus/django-semanticui-forms
|
example/app/apps.py
|
ExampleAppConfig.ready
|
python
|
def ready(self):
from .models import Friend
# Requires migrations, not necessary
try:
Friend.objects.get_or_create(first_name="Michael", last_name="1", age=22)
Friend.objects.get_or_create(first_name="Joe", last_name="2", age=21)
Friend.objects.get_or_create(first_name="Bill", last_name="3", age=20)
except:
pass
|
Create test friends for displaying.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/example/app/apps.py#L12-L24
| null |
class ExampleAppConfig(AppConfig):
"""
App config for the Example App.
"""
name = "app"
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_booleanfield
|
python
|
def render_booleanfield(field, attrs):
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
|
Render BooleanField with label next to instead of above.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L33-L47
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_choicefield
|
python
|
def render_choicefield(field, attrs, choices=None):
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
|
Render ChoiceField as 'div' dropdown rather than select for more
customization.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L50-L71
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n",
"def get_choices(field):\n\t\"\"\"\n\tFind choices of a field, whether it has choices or has a queryset.\n\n\tArgs:\n\t\tfield (BoundField): Django form boundfield\n\n\tReturns:\n\t\tlist: List of choices\n\t\"\"\"\t\n\tempty_label = getattr(field.field, \"empty_label\", False)\n\tneeds_empty_value = False\n\tchoices = []\n\n\t# Data is the choices\n\tif hasattr(field.field, \"_choices\"):\n\t\tchoices = field.field._choices\n\n\t# Data is a queryset\n\telif hasattr(field.field, \"_queryset\"):\n\t\tqueryset = field.field._queryset\n\t\tfield_name = getattr(field.field, \"to_field_name\") or \"pk\"\n\t\tchoices += ((getattr(obj, field_name), str(obj)) for obj in queryset)\n\n\t# Determine if an empty value is needed\n\tif choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):\n\t\tneeds_empty_value = True\n\n\t\t# Delete empty option\n\t\tif not choices[0][0]:\n\t\t\tdel choices[0]\n\n\t# Remove dashed empty choice\n\tif empty_label == BLANK_CHOICE_DASH[0][1]:\n\t\tempty_label = None\n\n\t# Add custom empty value\n\tif empty_label or not field.field.required:\n\t\tif needs_empty_value:\n\t\t\tchoices.insert(0, (\"\", empty_label or BLANK_CHOICE_DASH[0][1]))\n\n\treturn choices",
"def get_placeholder_text():\n\t\"\"\"\n\tReturn default or developer specified placeholder text.\n\t\"\"\"\n\treturn getattr(settings, \"SUI_PLACEHOLDER_TEXT\", \"Select\")\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_iconchoicefield
|
python
|
def render_iconchoicefield(field, attrs):
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
|
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L74-L96
|
[
"def render_choicefield(field, attrs, choices=None):\n\t\"\"\"\n\tRender ChoiceField as 'div' dropdown rather than select for more\n\tcustomization.\n\t\"\"\"\n\t# Allow custom choice list, but if no custom choice list then wrap all\n\t# choices into the `wrappers.CHOICE_TEMPLATE`\n\tif not choices:\n\t\tchoices = format_html_join(\"\", wrappers.CHOICE_TEMPLATE, get_choices(field))\n\n\t# Accessing the widget attrs directly saves them for a new use after\n\t# a POST request\n\tfield.field.widget.attrs[\"value\"] = field.value() or attrs.get(\"value\", \"\")\n\n\treturn wrappers.DROPDOWN_WRAPPER % {\n\t\t\"name\": field.html_name,\n\t\t\"attrs\": pad(flatatt(field.field.widget.attrs)),\n\t\t\"placeholder\": attrs.get(\"placeholder\") or get_placeholder_text(),\n\t\t\"style\": pad(attrs.get(\"_style\", \"\")),\n\t\t\"icon\": format_html(wrappers.ICON_TEMPLATE, attrs.get(\"_dropdown_icon\") or \"dropdown\"),\n\t\t\"choices\": choices\n\t}\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_countryfield
|
python
|
def render_countryfield(field, attrs):
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
|
Render a custom ChoiceField specific for CountryFields.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L99-L109
|
[
"def render_choicefield(field, attrs, choices=None):\n\t\"\"\"\n\tRender ChoiceField as 'div' dropdown rather than select for more\n\tcustomization.\n\t\"\"\"\n\t# Allow custom choice list, but if no custom choice list then wrap all\n\t# choices into the `wrappers.CHOICE_TEMPLATE`\n\tif not choices:\n\t\tchoices = format_html_join(\"\", wrappers.CHOICE_TEMPLATE, get_choices(field))\n\n\t# Accessing the widget attrs directly saves them for a new use after\n\t# a POST request\n\tfield.field.widget.attrs[\"value\"] = field.value() or attrs.get(\"value\", \"\")\n\n\treturn wrappers.DROPDOWN_WRAPPER % {\n\t\t\"name\": field.html_name,\n\t\t\"attrs\": pad(flatatt(field.field.widget.attrs)),\n\t\t\"placeholder\": attrs.get(\"placeholder\") or get_placeholder_text(),\n\t\t\"style\": pad(attrs.get(\"_style\", \"\")),\n\t\t\"icon\": format_html(wrappers.ICON_TEMPLATE, attrs.get(\"_dropdown_icon\") or \"dropdown\"),\n\t\t\"choices\": choices\n\t}\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_multiplechoicefield
|
python
|
def render_multiplechoicefield(field, attrs, choices=None):
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
|
MultipleChoiceField uses its own field, but also uses a queryset.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L112-L124
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n",
"def get_choices(field):\n\t\"\"\"\n\tFind choices of a field, whether it has choices or has a queryset.\n\n\tArgs:\n\t\tfield (BoundField): Django form boundfield\n\n\tReturns:\n\t\tlist: List of choices\n\t\"\"\"\t\n\tempty_label = getattr(field.field, \"empty_label\", False)\n\tneeds_empty_value = False\n\tchoices = []\n\n\t# Data is the choices\n\tif hasattr(field.field, \"_choices\"):\n\t\tchoices = field.field._choices\n\n\t# Data is a queryset\n\telif hasattr(field.field, \"_queryset\"):\n\t\tqueryset = field.field._queryset\n\t\tfield_name = getattr(field.field, \"to_field_name\") or \"pk\"\n\t\tchoices += ((getattr(obj, field_name), str(obj)) for obj in queryset)\n\n\t# Determine if an empty value is needed\n\tif choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):\n\t\tneeds_empty_value = True\n\n\t\t# Delete empty option\n\t\tif not choices[0][0]:\n\t\t\tdel choices[0]\n\n\t# Remove dashed empty choice\n\tif empty_label == BLANK_CHOICE_DASH[0][1]:\n\t\tempty_label = None\n\n\t# Add custom empty value\n\tif empty_label or not field.field.required:\n\t\tif needs_empty_value:\n\t\t\tchoices.insert(0, (\"\", empty_label or BLANK_CHOICE_DASH[0][1]))\n\n\treturn choices",
"def get_placeholder_text():\n\t\"\"\"\n\tReturn default or developer specified placeholder text.\n\t\"\"\"\n\treturn getattr(settings, \"SUI_PLACEHOLDER_TEXT\", \"Select\")\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_datefield
|
python
|
def render_datefield(field, attrs, style="date"):
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
|
DateField that uses wrappers.CALENDAR_WRAPPER.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L127-L136
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
def render_filefield(field, attrs):
"""
Render a typical File Field.
"""
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/fields.py
|
render_filefield
|
python
|
def render_filefield(field, attrs):
field.field.widget.attrs["style"] = "display:none"
if not "_no_label" in attrs:
attrs["_no_label"] = True
return wrappers.FILE_WRAPPER % {
"field": field,
"id": "id_" + field.name,
"style": pad(attrs.get("_style", "")),
"text": escape(attrs.get("_text", "Select File")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon", "file outline"))
}
|
Render a typical File Field.
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/fields.py#L153-L168
|
[
"def pad(value):\n\t\"\"\"\n\tAdd one space padding around value if value is valid.\n\n\tArgs:\n\t\tvalue (string): Value\n\n\tReturns:\n\t\tstring: Value with padding if value was valid else one space\n\t\"\"\"\n\treturn \" %s \" % value if value else \" \"\n"
] |
from django.utils.html import format_html, format_html_join, escape
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from . import wrappers
from .utils import pad, get_choices, get_placeholder_text
def render_charfield(field, attrs):
"""
Render the generic CharField.
"""
return field
def render_hiddenfield(field, attrs):
"""
Return input as a hidden field.
"""
if not "_no_wrapper" in attrs:
attrs["_no_wrapper"] = 1
return field
def render_nullbooleanfield(field, attrs):
"""
Render NullBooleanField as dropdown. ("Unknown", "Yes", "No")
"""
field.field.widget.attrs["class"] = "ui dropdown"
return field
def render_booleanfield(field, attrs):
"""
Render BooleanField with label next to instead of above.
"""
attrs.setdefault("_no_label", True) # No normal label for booleanfields
attrs.setdefault("_inline", True) # Checkbox should be inline
field.field.widget.attrs["style"] = "display:hidden" # Hidden field
return wrappers.CHECKBOX_WRAPPER % {
"style": pad(attrs.get("_style", "")),
"field": field,
"label": format_html(
wrappers.LABEL_TEMPLATE, field.html_name, mark_safe(field.label)
)
}
def render_choicefield(field, attrs, choices=None):
"""
Render ChoiceField as 'div' dropdown rather than select for more
customization.
"""
# Allow custom choice list, but if no custom choice list then wrap all
# choices into the `wrappers.CHOICE_TEMPLATE`
if not choices:
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
# Accessing the widget attrs directly saves them for a new use after
# a POST request
field.field.widget.attrs["value"] = field.value() or attrs.get("value", "")
return wrappers.DROPDOWN_WRAPPER % {
"name": field.html_name,
"attrs": pad(flatatt(field.field.widget.attrs)),
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
"choices": choices
}
def render_iconchoicefield(field, attrs):
"""
Render a ChoiceField with icon support; where the value is split by a pipe
(|): first element being the value, last element is the icon.
"""
choices = ""
# Loop over every choice to manipulate
for choice in field.field._choices:
value = choice[1].split("|") # Value|Icon
# Each choice is formatted with the choice value being split with
# the "|" as the delimeter. First element is the value, the second
# is the icon to be used.
choices += format_html(
wrappers.ICON_CHOICE_TEMPLATE,
choice[0], # Key
mark_safe(wrappers.ICON_TEMPLATE.format(value[-1])), # Icon
value[0] # Value
)
# Render a dropdown field
return render_choicefield(field, attrs, choices)
def render_countryfield(field, attrs):
"""
Render a custom ChoiceField specific for CountryFields.
"""
choices = ((k, k.lower(), v)
for k, v in field.field._choices[1:])
# Render a `ChoiceField` with all countries
return render_choicefield(
field, attrs, format_html_join("", wrappers.COUNTRY_TEMPLATE, choices)
)
def render_multiplechoicefield(field, attrs, choices=None):
"""
MultipleChoiceField uses its own field, but also uses a queryset.
"""
choices = format_html_join("", wrappers.CHOICE_TEMPLATE, get_choices(field))
return wrappers.MULTIPLE_DROPDOWN_WRAPPER % {
"name": field.html_name,
"field": field,
"choices": choices,
"placeholder": attrs.get("placeholder") or get_placeholder_text(),
"style": pad(attrs.get("_style", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_dropdown_icon") or "dropdown"),
}
def render_datefield(field, attrs, style="date"):
"""
DateField that uses wrappers.CALENDAR_WRAPPER.
"""
return wrappers.CALENDAR_WRAPPER % {
"field": field,
"style": pad(style),
"align": pad(attrs.get("_align", "")),
"icon": format_html(wrappers.ICON_TEMPLATE, attrs.get("_icon")),
}
def render_timefield(field, attrs):
"""
DateField with 'time' style.
"""
return render_datefield(field, attrs, "time")
def render_datetimefield(field, attrs):
"""
DateField with 'datetime' style.
"""
return render_datefield(field, attrs, "datetime")
FIELDS = {
# Generic Fields
None: render_charfield,
# Character Fields
"TextInput": render_charfield,
# Hidden Fields
"HiddenInput": render_hiddenfield,
"MultipleHiddenInput": render_hiddenfield,
# Boolean Fields
"CheckboxInput": render_booleanfield,
"NullBooleanSelect": render_nullbooleanfield,
# Choice Fields
"Select": render_choicefield,
"IconSelect": render_iconchoicefield,
"SelectMultiple": render_multiplechoicefield,
"CountrySelect": render_countryfield,
# Date/Time Fields
"TimeInput": render_timefield,
"DateInput": render_datefield,
"DateTimeInput": render_datetimefield,
# File Fields
"FileInput": render_filefield,
}
|
thetarkus/django-semanticui-forms
|
semanticuiforms/utils.py
|
get_choices
|
python
|
def get_choices(field):
"""
Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices
"""
empty_label = getattr(field.field, "empty_label", False)
needs_empty_value = False
choices = []
# Data is the choices
if hasattr(field.field, "_choices"):
choices = field.field._choices
# Data is a queryset
elif hasattr(field.field, "_queryset"):
queryset = field.field._queryset
field_name = getattr(field.field, "to_field_name") or "pk"
choices += ((getattr(obj, field_name), str(obj)) for obj in queryset)
# Determine if an empty value is needed
if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]):
needs_empty_value = True
# Delete empty option
if not choices[0][0]:
del choices[0]
# Remove dashed empty choice
if empty_label == BLANK_CHOICE_DASH[0][1]:
empty_label = None
# Add custom empty value
if empty_label or not field.field.required:
if needs_empty_value:
choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1]))
return choices
|
Find choices of a field, whether it has choices or has a queryset.
Args:
field (BoundField): Django form boundfield
Returns:
list: List of choices
|
train
|
https://github.com/thetarkus/django-semanticui-forms/blob/9664c6f01621568c3fa39b36439178586649eafe/semanticuiforms/utils.py#L25-L66
| null |
from django.db.models.fields import BLANK_CHOICE_DASH
from django.conf import settings
def pad(value):
"""
Add one space padding around value if value is valid.
Args:
value (string): Value
Returns:
string: Value with padding if value was valid else one space
"""
return " %s " % value if value else " "
def get_placeholder_text():
"""
Return default or developer specified placeholder text.
"""
return getattr(settings, "SUI_PLACEHOLDER_TEXT", "Select")
|
astropy/pyregion
|
pyregion/wcs_converter.py
|
_generate_arg_types
|
python
|
def _generate_arg_types(coordlist_length, shape_name):
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types
# repeat args between n1 and n2
n1, n2 = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) //
(n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types
|
Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/wcs_converter.py#L13-L55
| null |
import copy
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_area, proj_plane_pixel_scales
import numpy as np
from .wcs_helper import _estimate_angle
from .region_numbers import CoordOdd, Distance, Angle
from .parser_helper import Shape, CoordCommand
from .region_numbers import SimpleNumber, SimpleInteger
def convert_to_imagecoord(shape, header):
"""Convert the coordlist of `shape` to image coordinates
Parameters
----------
shape : `pyregion.parser_helper.Shape`
The `Shape` to convert coordinates
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Returns
-------
new_coordlist : list
A list of image coordinates defining the shape.
"""
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
is_even_distance = True
coord_list_iter = iter(zip(shape.coord_list, arg_types))
new_wcs = WCS(header)
pixel_scales = proj_plane_pixel_scales(new_wcs)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
old_coordinate = SkyCoord(coordinate, even_coordinate,
frame=shape.coord_format, unit='degree',
obstime='J2000')
new_coordlist.extend(
np.asscalar(x)
for x in old_coordinate.to_pixel(new_wcs, origin=1)
)
elif coordinate_type == Distance:
if arg_types[-1] == Angle:
degree_per_pixel = pixel_scales[0 if is_even_distance else 1]
is_even_distance = not is_even_distance
else:
degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))
new_coordlist.append(coordinate / degree_per_pixel)
elif coordinate_type == Angle:
new_angle = _estimate_angle(coordinate,
shape.coord_format,
header)
new_coordlist.append(new_angle)
else:
new_coordlist.append(coordinate)
return new_coordlist
def convert_physical_to_imagecoord(shape, header):
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
coord_list_iter = iter(zip(shape.coord_list, arg_types))
from .physical_coordinate import PhysicalCoordinate
pc = PhysicalCoordinate(header)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
xy0 = pc.to_image(coordinate, even_coordinate)
new_coordlist.extend(xy0)
elif coordinate_type == Distance:
new_coordlist.append(pc.to_image_distance(coordinate))
else:
new_coordlist.append(coordinate)
return new_coordlist
def check_wcs_and_convert(args, all_dms=False):
is_wcs = False
value_list = []
for a in args:
if isinstance(a, SimpleNumber) or isinstance(a, SimpleInteger) \
or all_dms:
value_list.append(a.v)
else:
value_list.append(a.degree)
is_wcs = True
return is_wcs, value_list
def check_wcs(l):
default_coord = "physical"
for l1, c1 in l:
if isinstance(l1, CoordCommand):
default_coord = l1.text.lower()
continue
if isinstance(l1, Shape):
if default_coord == "galactic":
is_wcs, coord_list = check_wcs_and_convert(l1.params,
all_dms=True)
else:
is_wcs, coord_list = check_wcs_and_convert(l1.params)
if is_wcs and (default_coord == "physical"): # ciao format
coord_format = "fk5"
else:
coord_format = default_coord
l1n = copy.copy(l1)
l1n.coord_list = coord_list
l1n.coord_format = coord_format
yield l1n, c1
else:
yield l1, c1
|
astropy/pyregion
|
pyregion/wcs_converter.py
|
convert_to_imagecoord
|
python
|
def convert_to_imagecoord(shape, header):
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
is_even_distance = True
coord_list_iter = iter(zip(shape.coord_list, arg_types))
new_wcs = WCS(header)
pixel_scales = proj_plane_pixel_scales(new_wcs)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
old_coordinate = SkyCoord(coordinate, even_coordinate,
frame=shape.coord_format, unit='degree',
obstime='J2000')
new_coordlist.extend(
np.asscalar(x)
for x in old_coordinate.to_pixel(new_wcs, origin=1)
)
elif coordinate_type == Distance:
if arg_types[-1] == Angle:
degree_per_pixel = pixel_scales[0 if is_even_distance else 1]
is_even_distance = not is_even_distance
else:
degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))
new_coordlist.append(coordinate / degree_per_pixel)
elif coordinate_type == Angle:
new_angle = _estimate_angle(coordinate,
shape.coord_format,
header)
new_coordlist.append(new_angle)
else:
new_coordlist.append(coordinate)
return new_coordlist
|
Convert the coordlist of `shape` to image coordinates
Parameters
----------
shape : `pyregion.parser_helper.Shape`
The `Shape` to convert coordinates
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Returns
-------
new_coordlist : list
A list of image coordinates defining the shape.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/wcs_converter.py#L58-L115
|
[
"def _generate_arg_types(coordlist_length, shape_name):\n \"\"\"Find coordinate types based on shape name and coordlist length\n\n This function returns a list of coordinate types based on which\n coordinates can be repeated for a given type of shap\n\n Parameters\n ----------\n coordlist_length : int\n The number of coordinates or arguments used to define the shape.\n\n shape_name : str\n One of the names in `pyregion.ds9_shape_defs`.\n\n Returns\n -------\n arg_types : list\n A list of objects from `pyregion.region_numbers` with a length equal to\n coordlist_length.\n\n \"\"\"\n from .ds9_region_parser import ds9_shape_defs\n from .ds9_attr_parser import ds9_shape_in_comment_defs\n\n if shape_name in ds9_shape_defs:\n shape_def = ds9_shape_defs[shape_name]\n else:\n shape_def = ds9_shape_in_comment_defs[shape_name]\n\n initial_arg_types = shape_def.args_list\n arg_repeats = shape_def.args_repeat\n\n if arg_repeats is None:\n return initial_arg_types\n\n # repeat args between n1 and n2\n n1, n2 = arg_repeats\n arg_types = list(initial_arg_types[:n1])\n num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)\n arg_types.extend((num_of_repeats - n1) //\n (n2 - n1) * initial_arg_types[n1:n2])\n arg_types.extend(initial_arg_types[n2:])\n return arg_types\n",
"def _estimate_angle(angle, reg_coordinate_frame, header):\n \"\"\"Transform an angle into a different frame\n\n Parameters\n ----------\n angle : float, int\n The number of degrees, measured from the Y axis in origin's frame\n\n reg_coordinate_frame : str\n Coordinate frame in which ``angle`` is defined\n\n header : `~astropy.io.fits.Header` instance\n Header describing the image\n\n Returns\n -------\n angle : float\n The angle, measured from the Y axis in the WCS defined by ``header'`\n \"\"\"\n y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header)\n return angle - y_axis_rot\n"
] |
import copy
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_area, proj_plane_pixel_scales
import numpy as np
from .wcs_helper import _estimate_angle
from .region_numbers import CoordOdd, Distance, Angle
from .parser_helper import Shape, CoordCommand
from .region_numbers import SimpleNumber, SimpleInteger
def _generate_arg_types(coordlist_length, shape_name):
"""Find coordinate types based on shape name and coordlist length
This function returns a list of coordinate types based on which
coordinates can be repeated for a given type of shap
Parameters
----------
coordlist_length : int
The number of coordinates or arguments used to define the shape.
shape_name : str
One of the names in `pyregion.ds9_shape_defs`.
Returns
-------
arg_types : list
A list of objects from `pyregion.region_numbers` with a length equal to
coordlist_length.
"""
from .ds9_region_parser import ds9_shape_defs
from .ds9_attr_parser import ds9_shape_in_comment_defs
if shape_name in ds9_shape_defs:
shape_def = ds9_shape_defs[shape_name]
else:
shape_def = ds9_shape_in_comment_defs[shape_name]
initial_arg_types = shape_def.args_list
arg_repeats = shape_def.args_repeat
if arg_repeats is None:
return initial_arg_types
# repeat args between n1 and n2
n1, n2 = arg_repeats
arg_types = list(initial_arg_types[:n1])
num_of_repeats = coordlist_length - (len(initial_arg_types) - n2)
arg_types.extend((num_of_repeats - n1) //
(n2 - n1) * initial_arg_types[n1:n2])
arg_types.extend(initial_arg_types[n2:])
return arg_types
def convert_physical_to_imagecoord(shape, header):
arg_types = _generate_arg_types(len(shape.coord_list), shape.name)
new_coordlist = []
coord_list_iter = iter(zip(shape.coord_list, arg_types))
from .physical_coordinate import PhysicalCoordinate
pc = PhysicalCoordinate(header)
for coordinate, coordinate_type in coord_list_iter:
if coordinate_type == CoordOdd:
even_coordinate = next(coord_list_iter)[0]
xy0 = pc.to_image(coordinate, even_coordinate)
new_coordlist.extend(xy0)
elif coordinate_type == Distance:
new_coordlist.append(pc.to_image_distance(coordinate))
else:
new_coordlist.append(coordinate)
return new_coordlist
def check_wcs_and_convert(args, all_dms=False):
is_wcs = False
value_list = []
for a in args:
if isinstance(a, SimpleNumber) or isinstance(a, SimpleInteger) \
or all_dms:
value_list.append(a.v)
else:
value_list.append(a.degree)
is_wcs = True
return is_wcs, value_list
def check_wcs(l):
default_coord = "physical"
for l1, c1 in l:
if isinstance(l1, CoordCommand):
default_coord = l1.text.lower()
continue
if isinstance(l1, Shape):
if default_coord == "galactic":
is_wcs, coord_list = check_wcs_and_convert(l1.params,
all_dms=True)
else:
is_wcs, coord_list = check_wcs_and_convert(l1.params)
if is_wcs and (default_coord == "physical"): # ciao format
coord_format = "fk5"
else:
coord_format = default_coord
l1n = copy.copy(l1)
l1n.coord_list = coord_list
l1n.coord_format = coord_format
yield l1n, c1
else:
yield l1, c1
|
astropy/pyregion
|
pyregion/region_to_filter.py
|
as_region_filter
|
python
|
def as_region_filter(shape_list, origin=1):
filter_list = []
for shape in shape_list:
if shape.name == "composite":
continue
if shape.name == "polygon":
xy = np.array(shape.coord_list) - origin
f = region_filter.Polygon(xy[::2], xy[1::2])
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Rotated(region_filter.Box(xc, yc, w, h),
rot, xc, yc)
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
if len(maj_list) > 1:
w1, h1 = max(maj_list), max(min_list)
w2, h2 = min(maj_list), min(min_list)
f1 = region_filter.Ellipse(xc, yc, w1, h1) \
& ~region_filter.Ellipse(xc, yc, w2, h2)
f = region_filter.Rotated(f1, angle, xc, yc)
else:
w, h = maj_list[0], min_list[0]
f = region_filter.Rotated(region_filter.Ellipse(xc, yc, w, h),
angle, xc, yc)
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
r1 = max(r_list)
r2 = min(r_list)
f = region_filter.Circle(xc, yc, r1) & ~region_filter.Circle(xc, yc, r2)
elif shape.name == "circle":
xc, yc, r = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f = region_filter.Circle(xc, yc, r)
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)
f = f1 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Ellipse(xc, yc, r21, r22) & ~region_filter.Ellipse(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
elif shape.name == "bpanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
f1 = region_filter.Box(xc, yc, r21, r22) & ~region_filter.Box(xc, yc, r11, r12)
f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)
f = region_filter.Rotated(f2, angle, xc, yc)
# f = f2 & region_filter.AngleRange(xc, yc, a1, a2)
else:
warnings.warn("'as_region_filter' does not know how to convert {0}"
" to a region filter.".format(shape.name))
continue
if shape.exclude:
filter_list = [region_filter.RegionOrList(*filter_list) & ~f]
else:
filter_list.append(f)
return region_filter.RegionOrList(*filter_list)
|
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, use origin=0.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/region_to_filter.py#L6-L117
| null |
import numpy as np
import pyregion._region_filter as region_filter
import warnings
|
astropy/pyregion
|
pyregion/core.py
|
parse
|
python
|
def parse(region_string):
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list)
|
Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L223-L242
|
[
"def check_wcs(l):\n default_coord = \"physical\"\n\n for l1, c1 in l:\n if isinstance(l1, CoordCommand):\n default_coord = l1.text.lower()\n continue\n if isinstance(l1, Shape):\n if default_coord == \"galactic\":\n is_wcs, coord_list = check_wcs_and_convert(l1.params,\n all_dms=True)\n else:\n is_wcs, coord_list = check_wcs_and_convert(l1.params)\n\n if is_wcs and (default_coord == \"physical\"): # ciao format\n coord_format = \"fk5\"\n else:\n coord_format = default_coord\n\n l1n = copy.copy(l1)\n\n l1n.coord_list = coord_list\n l1n.coord_format = coord_format\n\n yield l1n, c1\n else:\n yield l1, c1\n",
"def parse(self, s):\n\n for l in s.split(\"\\n\"):\n try:\n s, c, continued = self.parseLine(l)\n except ParseException:\n warnings.warn(\"Failed to parse : \" + l)\n self.flush()\n continue\n\n if len(s) > 1:\n for s1 in s[:-1]:\n yield s1, None\n\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif len(s) == 1:\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif c:\n yield None, c\n\n self.flush()\n",
"def convert_attr(self, l):\n global_attr = [], {}\n\n parser = Ds9AttrParser()\n\n for l1, c1 in l:\n if isinstance(l1, Global):\n for kv in parser.parse_default(l1.text):\n if len(kv) == 1:\n global_attr[0].append(kv[0])\n elif len(kv) == 2:\n if kv[0] == 'tag':\n global_attr[1].setdefault(kv[0], set()).add(kv[1])\n else:\n global_attr[1][kv[0]] = kv[1]\n\n elif isinstance(l1, Shape):\n if c1:\n attr_list = parser.parse_default(c1)\n attr0, attr1 = get_attr(attr_list, global_attr)\n else:\n attr0, attr1 = global_attr\n l1n = copy.copy(l1)\n l1n.attr = attr0, attr1\n yield l1n, c1\n\n elif not l1 and c1:\n shape, attr_list = parser.parse_check_shape(c1)\n if shape:\n shape.attr = get_attr(attr_list, global_attr)\n yield shape, c1\n else:\n yield l1, c1\n",
"def filter_shape2(sss):\n r = [s1 for s1 in sss if isinstance(s1[0], Shape)]\n return zip(*r)\n"
] |
from itertools import cycle
from .ds9_region_parser import RegionParser
from .wcs_converter import check_wcs as _check_wcs
_builtin_open = open
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
def open(fname):
"""Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string)
def read_region(s):
"""Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list)
def read_region_as_imagecoord(s, header):
"""Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list)
def get_mask(region, hdu, origin=1):
"""Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu)
"""
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.