code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import relations
class Relation:
"""
Base Relation class
"""
SAME = None
@classmethod
def relative_field(cls, model, relative):
"""
Returns the name of the relative field, based on the relative name
"""
# Check for the standard convention
standard = f"{model.NAME}_id"
if standard in relative._fields:
return standard
# Check to see if we're using the relative.ID, model.ID, model.relative_ID convention (diffent name for ID)
model_id = model._field_name(model.ID)
simple = f"{model.NAME}_{model_id}"
if simple in relative._fields:
return simple
# Check to see if we're using the relative.relative_id, model.model_id, model.relative_id patten
if model_id in relative._fields and (cls.SAME or model_id != relative._field_name(relative.ID)):
return model_id
raise relations.ModelError(model, f"cannot determine field for {model.NAME} in {relative.NAME}")
class OneTo(Relation):
"""
Class that specific one to * relationships
"""
Parent = None # Model having one record
parent_field = None # The id field of the parent to connect to the child
parent_child = None # The name of the attribute on the parent model to reference children
Child = None # Model having many reocrds
child_field = None # The if field in the child to connect to the parent field
child_parent = None # The name of the attribute on the child to reference the parent
def __init__(self, Parent, Child, parent_child=None, child_parent=None, parent_field=None, child_field=None):
self.Parent = Parent
self.Child = Child
parent = self.Parent.thy()
child = self.Child.thy()
self.parent_child = parent_child if parent_child is not None else child.NAME
self.child_parent = child_parent if child_parent is not None else parent.NAME
self.parent_field = parent._field_name(parent_field if parent_field is not None else parent._id)
self.child_field = child._field_name(child_field) if child_field is not None else self.relative_field(parent, child)
self.Parent._child(self)
self.Child._parent(self)
class OneToMany(OneTo):
"""
Class that specific one to many relationships
"""
MODE = "many"
SAME = False
class OneToOne(OneTo):
"""
Class that specific one to one relationships
"""
MODE = "one"
SAME = True | /relations_dil-0.6.13-py3-none-any.whl/relations/relation.py | 0.821438 | 0.47725 | relation.py | pypi |
import os
import glob
import json
import datetime
import relations
class MigrationsError(Exception):
"""
General migrations error
"""
class Migrations:
"""
Class for handling Migrations changes
"""
def __init__(self, directory="ddl"):
self.directory = directory
def current(self):
"""
Get the previous snapshot
"""
if not os.path.exists(f"{self.directory}/definition.json"):
return {}
with open(f"{self.directory}/definition.json", "r") as current_file:
return json.load(current_file)
@staticmethod
def define(models):
"""
Get definitions
"""
return {model["name"]: model for model in [model.thy().define() for model in models]}
@staticmethod
def rename(name, adds, removes, renames):
"""
Get which were actually rename
"""
if not adds or not removes:
return
print(f"Please indicate if any {name} were renamed:")
for remove in removes:
remaining = [add for add in adds if add not in renames.values()]
if not remaining:
break
for index, add in enumerate(remaining):
print(f"[{index + 1}] {add}")
rename = int(input(f"Which was {remove} renamed to? (return if none)") or '0')
if 0 < rename <= len(remaining):
renames[remove] = remaining[rename - 1]
for remove, add in renames.items():
removes.pop(removes.index(remove))
adds.pop(adds.index(add))
@staticmethod
def lookup(name, fields):
"""
Looks up a field by name
"""
for field in fields:
if name == field['name']:
return field
return None
@staticmethod
def field(current, define):
"""
Find the differences in a field
"""
migration = {}
for attr in set(current.keys()).union(define.keys()):
if current.get(attr) != define.get(attr):
migration[attr] = define.get(attr)
return migration
@classmethod
def fields(cls, model, current, define):
"""
Find the differences in fields
"""
migration = {}
current_names = [field["name"] for field in current]
define_names = [field["name"] for field in define]
add = [name for name in define_names if name not in current_names]
remove = [name for name in current_names if name not in define_names]
rename = {}
cls.rename(f"{model} fields", add, remove, rename)
if add:
migration["add"] = [field for field in define if field["name"] in add]
if remove:
migration["remove"] = remove
change = {}
for current_field in current:
define_field = cls.lookup(rename.get(current_field['name'], current_field['name']), define)
if define_field is not None and current_field != define_field:
change[current_field['name']] = cls.field(current_field, define_field)
if change:
migration["change"] = change
return migration
@classmethod
def indexes(cls, model, kind, current, define):
"""
Find the differences in indexes
"""
migration = {}
add = [name for name in sorted(define.keys()) if name not in current]
remove = [name for name in sorted(current.keys()) if name not in define]
rename = {}
cls.rename(f"{model} {kind}", add, remove, rename)
if add:
migration["add"] = {name: define[name] for name in add}
if remove:
migration["remove"] = remove
if rename:
for current_name, define_name in rename.items():
if current[current_name] != define[define_name]:
raise MigrationsError(f"{model} {kind} {current_name} and {define_name} must have same fields to rename")
migration["rename"] = rename
return migration
@classmethod
def model(cls, current, define):
"""
Find the differences in a model
"""
model = f"{current['name']}/{define['name']}" if current['name'] != define['name'] else current['name']
migration = {}
for attr in set(["name", "title", "id"] + list(current.keys()) + list(define.keys())):
if attr not in ["fields", "index", "unique"]:
if current.get(attr) != define.get(attr):
migration[attr] = define.get(attr)
if current["fields"] != define["fields"]:
migration["fields"] = cls.fields(model, current["fields"], define["fields"])
for attr in ["unique", "index"]:
kind = "unique indexes" if attr == "unique" else "indexes"
if current.get(attr) != define.get(attr):
migration[attr] = cls.indexes(model, kind, current.get(attr, {}), define.get(attr, {}))
return migration
@classmethod
def models(cls, current, define):
"""
Find the differences in models
"""
migration = {}
add = [name for name in sorted(define.keys()) if name not in current]
remove = [name for name in sorted(current.keys()) if name not in define]
rename = {}
cls.rename("models", add, remove, rename)
if add:
migration["add"] = {name: define[name] for name in add}
if remove:
migration["remove"] = {name: current[name] for name in remove}
change = {}
for name in current:
define_model = define.get(rename.get(name, name))
if define_model is not None and current[name] != define_model:
change[name] = {
"definition": current[name],
"migration": cls.model(current[name], define_model)
}
if change:
migration["change"] = change
return migration
def generate(self, models):
"""
Updates a Migrations change
"""
current = self.current()
define = self.define(models)
if current:
if current == define:
return False
migration = self.models(current, define)
stamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
os.rename(f"{self.directory}/definition.json", f"{self.directory}/definition-{stamp}.json")
with open(f"{self.directory}/migration-{stamp}.json", "w") as migration_file:
json.dump(migration, migration_file, indent=4, sort_keys=True)
with open(f"{self.directory}/definition.json", "w") as current_file:
json.dump(define, current_file, indent=4, sort_keys=True)
return True
def convert(self, name):
"""
Converts definitions and migrations to source definitions and migrations based on a source name
"""
source = relations.source(name)
source_path = f"{self.directory}/{source.name}/{source.KIND}"
os.makedirs(source_path, exist_ok=True)
for file_path in glob.glob(f"{self.directory}/*.json"):
file_name = file_path.split("/")[-1]
if file_name.startswith("definition"):
source.definition(file_path, source_path)
elif file_name.startswith("migration"):
source.migration(file_path, source_path)
def list(self, name):
"""
List out the migrations pairs for a source
"""
source = relations.source(name)
source_path = f"{self.directory}/{source.name}/{source.KIND}"
return source.list(source_path)
def load(self, name, file_name):
"""
Load a file from a source
"""
source = relations.source(name)
return source.load(f"{self.directory}/{source.name}/{source.KIND}/{file_name}")
def apply(self, name):
"""
Applies source definitions and migrations based on a source name
"""
source = relations.source(name)
source_path = f"{self.directory}/{source.name}/{source.KIND}"
return source.migrate(source_path) | /relations_dil-0.6.13-py3-none-any.whl/relations/migrations.py | 0.560012 | 0.238772 | migrations.py | pypi |
import overscore
class RecordError(Exception):
"""
Record Error excception which capture the record with the issue
"""
def __init__(self, record, message):
self.record = record
self.message = message
super().__init__(self.message)
class Record:
"""
Stores record for a Model
"""
_order = None # Access in order
_names = None # Access by name
_action = None # What to do with this record
def __init__(self):
"""
Initialize _names and _order
"""
self._order = []
self._names = {}
def insert(self, index, field):
"""
Inserts a field to a specifc location
"""
self._order.insert(index, field)
self._names[field.name] = field
def append(self, field):
"""
Adds a field
"""
self.insert(len(self._order), field)
def __setattr__(self, name, value):
"""
Use to set field values directly
"""
if name[0] != '_' and name in (self._names or []):
self._names[name].value = value
return
apply = overscore.parse(name)
if apply[0] in (self._names or []):
self._names[apply[0]].apply(apply[1:], value)
return
self.__dict__[name] = value
def __getattr__(self, name):
"""
Use to get field values directly
"""
if name in (self._names or []):
return self._names[name].value
access = overscore.parse(name)
if access[0] in (self._names or []):
return self._names[access[0]].access(access[1:])
raise AttributeError(f"'{self}' object has no attribute '{name}'")
def __len__(self):
"""
Use for numnber of record
"""
return len(self._order)
def __iter__(self):
"""
Use the _order of record to return the names
"""
for field in self._order:
yield field.name
def keys(self):
"""
Implements keys for converting to dict
"""
return iter(self)
def __contains__(self, key):
"""
Checks numerically or by name
"""
if isinstance(key, int) and key < len(self._order):
return True
if key in self._names:
return True
return False
def __setitem__(self, key, value):
"""
Sets numerically or by name
"""
if isinstance(key, int):
if key < len(self._order):
self._order[key].value = value
return
if key in self._names:
self._names[key].value = value
return
apply = overscore.parse(key)
if apply[0] in (self._names or []):
self._names[apply[0]].apply(apply[1:], value)
return
raise RecordError(self, f"unknown field '{key}'")
def __getitem__(self, key):
"""
Access numerically or by name
"""
if isinstance(key, int):
if key < len(self._order):
return self._order[key].value
if key in self._names:
return self._names[key].value
access = overscore.parse(key)
if access[0] in (self._names or []):
return self._names[access[0]].access(access[1:])
raise RecordError(self, f"unknown field '{key}'")
def define(self):
"""
Gets all the defintions for fields
"""
return [field.define() for field in self._order]
def filter(self, criterion, value):
"""
Sets criterion on a field
"""
if isinstance(criterion, int):
if criterion < len(self._order):
return self._order[criterion].filter(value)
else:
if '__' in criterion:
name, operator = criterion.split("__", 1)
if name in self._names:
return self._names[name].filter(value, operator)
if criterion in self._names:
return self._names[criterion].filter(value)
raise RecordError(self, f"unknown criterion '{criterion}'")
def export(self):
"""
Exports value by name
"""
return {field.name: field.export() for field in self._order}
def create(self, values):
"""
Writes values for create
"""
inject = []
for field in self._order:
if field.inject:
inject.append(field)
else:
field.create(values)
for field in inject:
field.create(values[self._names[field.inject.split('__')[0]].store])
return values
def retrieve(self, values):
"""
Sees if a record satisfies criteria in a dict
"""
for field in self._order:
if not field.retrieve(values):
return False
return True
def like(self, values, titles, like, parents):
"""
Sees if a record matches a like value
"""
for field in titles:
field = overscore.parse(field)
if self._names[field[0]].like(values, like, parents, field[1:]):
return True
return False
def read(self, values):
"""
Loads the value from storage
"""
for field in self._order:
if field.inject:
field.read(values[self._names[field.inject.split('__')[0]].store])
else:
field.read(values)
def update(self, values):
"""
Writes values for update
"""
inject = []
for field in self._order:
if field.inject:
inject.append(field)
else:
field.update(values)
for field in inject:
if field.delta() or field.refresh:
store = field.inject.split('__')[0]
if self._names[store].store not in values:
self._names[store].write(values)
field.update(values[self._names[store].store])
return values
def mass(self, values):
"""
Writes values for update
"""
inject = []
for field in self._order:
if field.inject:
inject.append(field)
else:
field.mass(values)
for field in inject:
if field.changed or field.refresh:
store = field.inject.split('__')[0]
if self._names[store].store not in values:
self._names[store].write(values)
field.mass(values[self._names[field.inject.split('__')[0]].store])
return values | /relations_dil-0.6.13-py3-none-any.whl/relations/record.py | 0.613815 | 0.319047 | record.py | pypi |
import relations
class Source:
"""
Base Abstraction for Source
"""
name = None
KIND = None
def __new__(cls, *args, **kwargs):
"""
Register this source
"""
self = object.__new__(cls)
self.name = kwargs.get("name", args[0])
for key in kwargs:
setattr(self, key, kwargs[key])
relations.register(self)
return self
def ensure_attribute(self, item, attribute, default=None): # pylint: disable=no-self-use
"""
ensure the item has the attribute
"""
if hasattr(item, attribute):
return
setattr(item, attribute, default)
def field_init(self, field):
"""
init the field
"""
def record_init(self, record):
"""
init the record
"""
for field in record._order:
self.field_init(field)
def init(self, model):
"""
init the model
"""
self.record_init(model._fields)
def field_define(self, field, *args, **kwargs):
"""
define the field
"""
def record_define(self, record, *args, **kwargs):
"""
define the record
"""
for field in record:
self.field_define(field, *args, **kwargs)
def define(self, model):
"""
define the model
"""
def field_add(self, migration, *args, **kwargs):
"""
add the field
"""
def field_remove(self, definition, *args, **kwargs):
"""
remove the field
"""
def field_change(self, definition, migration, *args, **kwargs):
"""
change the field
"""
def record_change(self, definition, migration, *args, **kwargs):
"""
define the record
"""
for add in migration.get('add', []):
self.field_add(add, *args, **kwargs)
for remove in migration.get('remove', []):
self.field_remove(relations.Migrations.lookup(remove, definition), *args, **kwargs)
for field in definition:
if field["name"] in migration.get("change", {}):
self.field_change(field, migration["change"][field['name']], *args, **kwargs)
def model_add(self, migration):
"""
define the model
"""
def model_remove(self, definition):
"""
define the model
"""
def model_change(self, definition, migration):
"""
define the model
"""
def create_field(self, field, *args, **kwargs):
"""
create the field
"""
def create_record(self, record, *args, **kwargs):
"""
create the record
"""
for field in record._order:
self.create_field(field, *args, **kwargs)
def create_query(self, model, *args, **kwargs):
"""
Create query
"""
def create(self, model, *args, **kwargs):
"""
create the model
"""
def retrieve_field(self, field, *args, **kwargs):
"""
retrieve the field
"""
def retrieve_record(self, record, *args, **kwargs):
"""
retrieve the record
"""
for field in record._order:
self.retrieve_field(field, *args, **kwargs)
def count_query(self, model, *args, **kwargs):
"""
Count query
"""
def count(self, model, *args, **kwargs):
"""
retrieve the model
"""
def retrieve_query(self, model, *args, **kwargs):
"""
retrieve query
"""
def retrieve(self, model, verify=True, *args, **kwargs):
"""
retrieve the model
"""
def titles_query(self, model, *args, **kwargs):
"""
titles query
"""
def titles(self, model, *args, **kwargs):
"""
titles of the model
"""
def update_field(self, field, *args, **kwargs):
"""
update the field
"""
def update_record(self, record, *args, **kwargs):
"""
update the record
"""
for field in record._order:
self.update_field(field, *args, **kwargs)
def field_mass(self, field, *args, **kwargs):
"""
mass the field
"""
def record_mass(self, record, *args, **kwargs):
"""
mass the record
"""
for field in record._order:
self.field_mass(field, *args, **kwargs)
def update_query(self, model, *args, **kwargs):
"""
update query
"""
def update(self, model, *args, **kwargs):
"""
update the model
"""
def delete_field(self, field, *args, **kwargs):
"""
delete the field
"""
def delete_record(self, record, *args, **kwargs):
"""
delete the record
"""
for field in record._order:
self.delete_field(field, *args, **kwargs)
def delete_query(self, model, *args, **kwargs):
"""
delete query
"""
def delete(self, model, *args, **kwargs):
"""
delete the model
"""
def definition(self, file_path, source_path):
"""
Concvert a general definition file to a source specific file
"""
def migration(self, file_path, source_path):
"""
Concvert a general migration file to a source specific file
"""
def execute(self, commands):
"""
Execute a command or commands
"""
def list(self, source_path):
"""
List the migration pairs in reverse order fro verification
"""
def load(self, file_path):
"""
Load a file into the database
"""
def migrate(self, source_path):
"""
Execute a command or commands
""" | /relations_dil-0.6.13-py3-none-any.whl/relations/source.py | 0.593374 | 0.266429 | source.py | pypi |
import copy
import functools
import overscore
import relations
class ModelError(Exception):
"""
Generic model Error for easier tracing
"""
def __init__(self, model, message):
self.model = model
self.message = message
super().__init__(self.message)
def __str__(self):
"""
Might want to mention the model and info about it
"""
return f"{self.model.NAME}: {self.message}"
class ModelIdentity:
"""
Intermiedate statuc type class for constructing mode information with a full model
"""
SOURCE = None # Data source
TITLE = None # Title of the Model
NAME = None # Name of the Model
ID = 0 # Ref of id field (assumes first field)
TITLES = None # Fields that make up the titles of the model
LIST = None # Default fields to list
UNIQUE = None # Unique indexes
INDEX = None # Regular indexes
ORDER = None # Default sort order
CHUNK = 100 # Default chunk
PARENTS = None # Parent relationships (many/one to one)
CHILDREN = None # Child relationships (one to many/one)
SISTERS = None # Sister relationships (many to many)
BROTHERS = None # Brother relationships (many to many)
_fields = None # Base record to create other records with
_id = None # Name of id field
_titles = None # Actual fields of the titles
_list = None # Actual fields to list
_unique = None # Actual unique indexes
_index = None # Actual indexes
_order = None # Default sort order
DEFINE = [
"id",
"unique",
"index"
]
UNDEFINE = [
"ID",
"TITLES",
"LIST",
"UNIQUE",
"INDEX",
"ORDER",
"CHUNK",
"PARENTS",
"CHILDREN",
"SISTERS",
"BROTHERS"
]
@staticmethod
def underscore(name):
"""
Turns camel case to underscored
"""
underscored = []
previous = True
for letter in name:
lowered = letter.lower()
if not previous and lowered != letter:
underscored.append('_')
underscored.append(lowered)
previous = (lowered != letter)
return ''.join(underscored)
@classmethod
def thy(cls, self=None):
"""
Base identity to be known without instantiating the class
"""
# If self wasn't sent, we're just providing a shell of an instance
if self is None:
self = ModelIdentity()
self.__dict__.update(cls.__dict__)
# Use TITLE, NAME if set, else use class name
setattr(self, 'TITLE', cls.TITLE or cls.__name__)
setattr(self, 'NAME', cls.NAME or cls.underscore(self.TITLE))
# Derive all the fields
fields = relations.Record()
for name, attribute in cls.__dict__.items():
if name.startswith('_') or name != name.lower():
continue # pragma: no cover
if attribute in [bool, int, float, str, set, list, dict]:
field = relations.Field(attribute)
elif callable(attribute):
field = relations.Field(type(attribute()), default=attribute)
elif isinstance(attribute, set):
field = relations.Field(set, options=sorted(attribute))
elif isinstance(attribute, tuple) and attribute and isinstance(attribute[0], str):
field = relations.Field(set, options=list(attribute))
elif isinstance(attribute, list):
field = relations.Field(type(attribute[0]), default=attribute[0], options=attribute)
elif isinstance(attribute, tuple):
field = relations.Field(*attribute)
elif isinstance(attribute, dict):
field = relations.Field(**attribute)
elif isinstance(attribute, relations.Field):
field = attribute
else:
continue # pragma: no cover
field.name = name
fields.append(field)
setattr(self, '_fields', fields)
# Determine the _id field name
if cls.ID is not None:
setattr(self, '_id', self._field_name(cls.ID))
# Figure out the titles
titles = self.TITLES
if not titles:
titles = []
for field in self._fields._order:
if self._id == field.name:
continue
if field.kind in (int, str):
titles.append(field.name)
if field.kind == str and field._none is None:
field.none = False
if field.kind == str:
break
if isinstance(titles, str):
titles = [titles]
self._titles = titles
for field in self._titles:
if field.split('__', 1)[0] not in self._fields:
raise ModelError(self, f"cannot find field {field} from titles")
# Figure out the list
if self.LIST:
self._list = self.LIST
else:
self._list = list(self._titles)
if self._id and self._id not in self._list:
self._list.insert(0, self._id)
if isinstance(self._list, str):
self._list = [self._list]
# Make sure all the list checks out
for field in self._list:
if field.split('__', 1)[0] not in self._fields:
raise ModelError(self, f"cannot find field {field} from list")
# Figure out unique indexes
unique = self.UNIQUE
if unique is None:
unique = self._titles
elif not unique:
unique = {}
if isinstance(unique, str):
unique = [unique]
if isinstance(unique, list):
unique = {
"-".join(unique): unique
}
if isinstance(unique, dict):
self._unique = unique
# Make sure all the unique indexes check out
for unique in self._unique:
for field in self._unique[unique]:
if field.split('__')[0] not in self._fields:
raise ModelError(self, f"cannot find field {field} from unique {unique}")
index = self.INDEX or {}
if isinstance(index, str):
index = [index]
if isinstance(index, list):
index = {
"-".join(index): index
}
if isinstance(index, dict):
self._index = index
# Make sure all the indexes check out
for index in self._index:
for field in self._index[index]:
if field.split('__')[0] not in self._fields:
raise ModelError(self, f"cannot find field {field} from index {index}")
# Make sure all inject fields reference actual fields that are lists or dicts
for field, inject in [(field, field.inject.split('__')[0]) for field in self._fields._order if field.inject]:
if inject not in self._fields:
raise relations.FieldError(field, f"cannot find field {inject} from inject {field.inject}")
if self._fields._names[inject].kind not in [list, dict]:
raise relations.FieldError(self, f"field {inject} not list or dict from inject {field.inject}")
# Determine default sort order (if desired)
if self.ORDER:
self._order = self._ordering(self.ORDER)
elif self.ORDER is None and len(self._unique) == 1:
self._order = self._ordering(list(self._unique.values())[0])
else:
self._order = []
# Initialize relation models
self.PARENTS = cls.PARENTS or {}
self.CHILDREN = cls.CHILDREN or {}
self.SISTERS = cls.SISTERS or {}
self.BROTHERS = cls.BROTHERS or {}
# Have the the source do whatever it needs to
self.SOURCE = cls.SOURCE
if relations.source(self.SOURCE) is not None:
relations.source(self.SOURCE).init(self)
return self
def _field_name(self, field):
"""
Returns the name of the field, whether index or name
"""
if field not in self._fields:
raise ModelError(self, f"cannot find field {field} in {self.NAME}")
if isinstance(field, str):
return field
return self._fields._order[field].name
def _ordering(self, order):
"""
Creates a prefix sorting list, including field checks
"""
if isinstance(order, str):
order = [order]
ordering = []
for sort in order:
field = sort[1:] if sort[0] in ['-', '+'] else sort
if field.split('__')[0] not in self._fields:
raise ModelError(self, f"unknown sort field {field}")
ordering.append(sort if sort != field else f"+{sort}")
return ordering
def _ancestor(self, field):
"""
Looks up a parent class for a field
"""
for relation in self.PARENTS.values():
if field == relation.child_field:
return relation
return None
def define(self):
"""
define the identity
"""
definition = {
"fields": self._fields.define(),
}
for attr in self.DEFINE:
if getattr(self, f"_{attr}") is not None:
definition[attr] = getattr(self, f"_{attr}")
for attr in self.__dict__:
if attr[0] != '_' and attr == attr.upper() and attr not in self.UNDEFINE and getattr(self, attr) is not None:
definition[attr.lower()] = getattr(self, attr)
return definition
def migrate(self, previous, definition=None):
"""
migrates from a previous identity
"""
if definition is None:
definition = self.define()
return relations.Migrations.model(previous, definition)
class Model(ModelIdentity):
"""
Main model class
"""
_record = None # The current loaded single record (from get/create)
_models = None # The current loaded multiple models (from list/create)
_parents = None # Parent models
_children = None # Children models
_sisters = None # Sister models
_brothers = None # Brother models
_role = None # Whether we're a model, parent or child
_mode = None # Whether we're dealing with one or many
_bulk = None # Whether we're bulk inserting
_chunk = None # Default chunk size
_size = None # When to auto insert
_like = None # Current fuzzy match
_sort = None # What to sort by
_limit = None # If we're limiting, how much
_offset = None # If we're limiting, where to start
_action = None # Overall action of this model
_related = None # Which fields will be set automatically
overflow = False # Whether our overflow limt was reached
@staticmethod
def _extract(kwargs, name, default=None):
"""
Grabs and remove a value from kwargs so we can chain properly
"""
if name in kwargs:
value = kwargs[name]
del kwargs[name]
return value
return default
def __init__(self, *args, **kwargs):
"""
Creation is implied but we want to set stuff and call create impliicitly
"""
# Know thyself
self.thy(self)
# Initialize relation models
self._parents = {}
self._children = {}
self._sisters = {}
self._brothers = {}
self._related = {}
# Making things and explicit, we're going to derive a lot defaults from
# context of what the user sent in
# If a child's been sent in, we're a parent and we're retrieving as one
_read = self._extract(kwargs, '_read')
_child = self._extract(kwargs, '_child')
_parent = self._extract(kwargs, '_parent')
# Now just assume things were sent explicitly and we might override them
# later because the logic here is pretty hairy
self._role = "model"
self._action = self._extract(kwargs, '_action', "create")
self._chunk = self._extract(kwargs, '_chunk', self.CHUNK)
# If we're being created from reading from a source
if _read is not None:
self._mode = "one"
self._action = "update"
self._record = self._build(self._action, _read=_read)
# If we're being created as a parent
elif _child is not None:
self._related = _child
self._role = "parent"
self._mode = "one"
self._action = "retrieve"
self._record = self._build(self._action, _defaults=False)
self.filter(*args, **kwargs)
# If we being created as a child
elif _parent is not None:
self._related = _parent
self._role = "child"
self._mode = self._extract(kwargs, '_mode')
self._action = "retrieve" if list(self._related.values())[0] is not None else "create"
if self._action == "retrieve":
self._record = self._build(self._action, _defaults=False)
self.filter(*args, **kwargs)
# If we're being created as a search
elif self._action == "retrieve":
self._mode = self._extract(kwargs, '_mode')
self._record = self._build(self._action, _defaults=False)
self.filter(*args, **kwargs)
# IF we're just being straight up (now tell me) created
elif self._action == "create":
self._bulk = self._extract(kwargs, '_bulk', False)
self._size = self._extract(kwargs, '_size', self._chunk)
mode = "many" if self._bulk or (args and isinstance(args[0], list)) else "one"
self._mode = self._extract(kwargs, '_mode', mode)
self._related = self._extract(kwargs, '_related', {})
if self._mode == "many":
self._models = []
if args:
for each in args[0]:
eargs = each if isinstance(each, list) else []
ekwargs = each if isinstance(each, dict) else {}
self._models.append(self.__class__(*eargs, **ekwargs))
else:
self._record = self._build(self._action, *args, **kwargs)
def __setattr__(self, name, value):
"""
Use to set field values directly
"""
if name[0] != '_' and name == name.lower() and name.split('__')[0] in (object.__getattribute__(self, '_fields') or []):
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
setattr(self._models[0], name, value)
else:
raise ModelError(self, "no record")
elif self._mode == "one":
self._record[name] = value
if '__' not in name:
self._propagate(name, value)
else:
if self._models:
for model in self._models:
setattr(model, name, value)
else:
raise ModelError(self, "no records")
else:
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Used to get relation models directly
"""
if name in self.PARENTS or name in self.CHILDREN:
self._ensure()
if self._mode == "one":
return self._relate(name)
if '__' not in name:
raise AttributeError(f"'{self}' object has no attribute '{name}'")
current = self
path = overscore.parse(name)
for place in path:
current = current[place]
return current
def __getattribute__(self, name):
"""
Use to get field values directly
"""
if name[0] != '_' and name == name.lower() and name.split('__')[0] in (object.__getattribute__(self, '_fields') or []):
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
return getattr(self._models[0], name)
raise ModelError(self, "no record")
if self._mode == "one":
return self._record[name]
if self._models is None:
raise ModelError(self, "no records")
return [getattr(model, name) for model in self._models]
return object.__getattribute__(self, name)
def __len__(self):
"""
Use for number of records
"""
if self._action == "retrieve" and self._mode == "many":
return self.count()
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
return len(self._models[0])
return 0
if self._mode == "one":
return len(self._record)
return len(self._models)
def __iter__(self):
"""
Use the order of record
"""
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
return iter(self._models[0])
return iter([])
if self._mode == "one":
return iter(self._record)
return iter(self._models)
def keys(self):
"""
Use the order of record
"""
self._ensure()
if self._mode == "many":
raise ModelError(self, "no keys with many")
if self._role == "child":
if self._models:
return iter(self._models[0]._record._names)
return iter([])
return iter(self._record._names)
def __contains__(self, key):
"""
Checks numerically or by name
"""
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
return key in self._models[0]
return False
if self._mode == "one":
return key in self._record
if self._models:
return key in self._fields
return False
def __setitem__(self, key, value):
"""
Access numerically or by name
"""
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
self._models[0][key] = value
else:
raise ModelError(self, "no record")
elif self._mode == "one":
self._record[key] = value
self._propagate(key, value)
else:
if isinstance(key, int):
raise ModelError(self, "no override")
if self._models:
for model in self._models:
model[key] = value
else:
raise ModelError(self, "no records")
def __getitem__(self, key):
"""
Access numerically or by name
"""
if isinstance(key, str) and '__' in key:
return getattr(self, key)
self._ensure()
if self._role == "child" and self._mode == "one":
if self._models:
return self._models[0][key]
raise ModelError(self, "no record")
if self._mode == "one":
if key in self.PARENTS or key in self.CHILDREN:
return self._relate(key)
return self._record[key]
if self._models is None:
raise ModelError(self, "no records")
if isinstance(key, int):
return self._models[key]
return [model[key] for model in self._models]
@classmethod
def _parent(cls, relation):
"""
Adds a parent to the class
"""
cls.PARENTS = cls.PARENTS or {}
cls.PARENTS[relation.child_parent] = relation
@classmethod
def _child(cls, relation):
"""
Adds a child to the class
"""
cls.CHILDREN = cls.CHILDREN or {}
cls.CHILDREN[relation.parent_child] = relation
# These aren't used yet and might need to go
@classmethod
def _sister(cls, relation):
"""
Adds a sister to the class
"""
cls.SISTERS = cls.SISTERS or {}
cls.SISTERS[relation.brother_sister] = relation
@classmethod
def _brother(cls, relation):
"""
Adds a brother to the class
"""
cls.BROTHERS = cls.BROTHERS or {}
cls.BROTHERS[relation.sister_brother] = relation
def _relate(self, name):
"""
Looks up a relation by attribute name
"""
if name in self.PARENTS: # pylint: disable=no-else-return
relation = self.PARENTS[name]
if self._parents.get(name) is None:
if self._action == "retrieve":
self._parents[name] = relation.Parent.many().limit(self._chunk)
else:
self._parents[name] = relation.Parent(_child={relation.parent_field: self[relation.child_field]})
return self._parents[name]
elif name in self.CHILDREN:
relation = self.CHILDREN[name]
if self._children.get(name) is None:
if self._action == "retrieve":
self._children[name] = relation.Child.many().limit(self._chunk)
else:
self._children[name] = relation.Child(
_parent={relation.child_field: self._record[relation.parent_field]}, _mode=relation.MODE
)
return self._children[name]
return None
def _collate(self):
"""
Executes relatives criteria and adds to our own
"""
for child_parent, relation in self.PARENTS.items():
if self._parents.get(child_parent) is not None:
self._record.filter(f"{relation.child_field}__in", self._parents[child_parent][relation.parent_field])
self.overflow = self.overflow or self._parents[child_parent].overflow
del self._parents[child_parent]
for parent_child, relation in self.CHILDREN.items():
if self._children.get(parent_child) is not None:
self._record.filter(f"{relation.parent_field}__in", self._children[parent_child][relation.child_field])
self.overflow = self.overflow or self._children[parent_child].overflow
del self._children[parent_child]
def _propagate(self, field, value):
"""
Remove a relation when its field is set or reset a parent field
"""
field_name = self._field_name(field)
if field_name in self._related:
self._related[field_name] = value
for child_parent, relation in self.PARENTS.items():
if field_name == relation.child_field:
self._parents[child_parent] = None
for parent_child, relation in self.CHILDREN.items():
if field_name == relation.parent_field and self._relate(parent_child):
self._relate(parent_child)[relation.child_field] = value
def _input(self, record, *args, **kwargs):
"""
Fills in field values from args, kwargs
"""
field = 0
for value in args:
while record._order[field].auto or record._order[field].name in self._related:
field += 1
record[field] = value
field += 1
for name, value in kwargs.items():
record[name] = value
def _build(self, _action, *args, **kwargs):
"""
Fills in record
"""
_defaults = self._extract(kwargs, '_defaults', True)
_read = self._extract(kwargs, '_read')
record = copy.deepcopy(self._fields)
record._action = _action
if _defaults:
for field in record._order:
if field.default is not None:
field.value = field.default() if callable(field.default) else field.default
if _read is not None:
record.read(_read)
for field, value in self._related.items():
record[field] = value
self._input(record, *args, **kwargs)
return record
def _ensure(self):
"""
Makes sure there's records if there's criteria
"""
if self._action == "retrieve":
if self._record._action == "update":
raise ModelError(self, "need to update")
self.retrieve()
def _each(self, action=None):
"""
Converts to all models, whether _record or _models
"""
if self._record and (action is None or self._action == action):
return [self]
if self._models:
return [model for model in self._models if action is None or model._action == action]
return []
def filter(self, *args, **kwargs):
"""
Sets to return multiple records
"""
for field, value in self._related.items():
self._record.filter(field, value)
for index, value in enumerate(args):
self._record.filter(index, value)
for name, value in kwargs.items():
if name == "like":
self._like = value
else:
pieces = name.split('__', 1)
relation = self._relate(pieces[0])
if relation is not None:
relation.filter(**{pieces[1]: value})
else:
self._record.filter(name, value)
return self
@classmethod
def bulk(cls, size=None):
"""
For inserting multiple records without getting id's
"""
return cls(_action="create", _mode="many", _bulk=True, _size=size or cls.CHUNK)
@classmethod
def one(cls, *args, **kwargs):
"""
For retrieving a single record
"""
return cls(_action="retrieve", _mode="one", *args, **kwargs)
@classmethod
def many(cls, *args, **kwargs):
"""
Sets to return multiple records
"""
return cls(_action="retrieve", _mode="many", *args, **kwargs)
def sort(self, *args):
"""
Adding sorting to filtering or sorts existing records
"""
if self._mode == "one":
raise ModelError(self, "cannot sort one")
if not args:
return self
sorting = self._ordering(args)
# If we're retrieving, just add to existing
if self._action == "retrieve":
self._sort = self._sort or []
self._sort.extend(sorting)
else:
def compare(model1, model2):
for sort in sorting:
cmp = (model1[sort[1:]] > model2[sort[1:]]) - (model1[sort[1:]] < model2[sort[1:]])
return cmp if sort[0] == '+' else -cmp
self._models = sorted(self._models, key=functools.cmp_to_key(compare))
return self
def limit(self, limit=None, start=0, page=None, per_page=None):
"""
Adding sorting to filtering or sorts existing records
"""
if limit is None:
limit = self.CHUNK
# If we're not retrieving, there's no point in limiting
if self._action != "retrieve":
raise ModelError(self, "can only limit retrieve")
self._limit = per_page if per_page is not None else limit
self._offset = (page - 1) * self._limit if page is not None else start
return self
def set(self, *args, **kwargs):
"""
Sets a single or multiple records or prepares to
"""
# If we're retrieving, make we're only getting one or we'll store
if self._action == "retrieve":
if self._mode == "one":
self.retrieve()
else:
self._record._action = "update"
for model in self._each():
self._input(model._record, *args, **kwargs)
return self
def add(self, *args, **kwargs):
"""
Adds records
"""
self._ensure()
_count = self._extract(kwargs, '_count', 1)
if self._role == "child" and self._mode == "one":
if self._models or _count > 1:
raise ModelError(self, "only one allowed")
self._models = [
self.__class__(_action="create", _related=self._related, *args, **kwargs)
]
elif self._mode == "one":
raise ModelError(self, "only one allowed")
else:
if self._models is None:
self._models = []
for _ in range(_count):
self._models.append(self.__class__(_action="create", _related=self._related, *args, **kwargs))
if self._bulk and len(self._models) >= self._size:
self.create()
return self
def export(self):
"""
Converts to all models, whether _record or _models
"""
self._ensure()
if self._record:
return self._record.export()
if self._models:
return [model.export() for model in self._models]
return []
@classmethod
def define(cls, *args, **kwargs):
"""
define the model
"""
return relations.source(cls.SOURCE).define(cls.thy().define(), *args, **kwargs)
def create(self, *args, **kwargs):
"""
create the model
"""
if self._action not in ["create", "update"]:
raise ModelError(self, f"cannot create during {self._action}")
return relations.source(self.SOURCE).create(self, *args, **kwargs)
def count(self, *args, **kwargs):
"""
count the models
"""
if self._action not in ["update", "retrieve"]:
raise ModelError(self, f"cannot count during {self._action}")
return relations.source(self.SOURCE).count(self, *args, **kwargs)
def retrieve(self, verify=True, *args, **kwargs):
"""
retrieve the model
"""
if self._action != "retrieve":
raise ModelError(self, f"cannot retrieve during {self._action}")
return relations.source(self.SOURCE).retrieve(self, verify, *args, **kwargs)
def titles(self, *args, **kwargs):
"""
retrieve the model
"""
if self._action not in ["update", "retrieve"]:
raise ModelError(self, f"cannot titles during {self._action}")
return relations.source(self.SOURCE).titles(self, *args, **kwargs)
def update(self, *args, **kwargs):
"""
update the model
"""
if self._action not in ["update", "retrieve"]:
raise ModelError(self, f"cannot update during {self._action}")
return relations.source(self.SOURCE).update(self, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
delete the model
"""
if self._action not in ["update", "retrieve"]:
raise ModelError(self, f"cannot delete during {self._action}")
if self._action == "retrieve" and self._mode == "one":
self.retrieve()
return relations.source(self.SOURCE).delete(self, *args, **kwargs)
def query(self, action=None, *args, **kwargs):
"""
get the current query for the model
"""
if self._action == "create":
return relations.source(self.SOURCE).create_query(self, *args, **kwargs).bind(self)
if self._action == "retrieve" and action == "count":
return relations.source(self.SOURCE).count_query(self, *args, **kwargs).bind(self)
if self._action == "retrieve" and action == "titles":
return relations.source(self.SOURCE).titles_query(self, *args, **kwargs).bind(self)
if action == "update" or (action is None and self._action == "update"):
return relations.source(self.SOURCE).update_query(self, *args, **kwargs).bind(self)
if action == "delete":
return relations.source(self.SOURCE).delete_query(self, *args, **kwargs).bind(self)
return relations.source(self.SOURCE).retrieve_query(self, *args, **kwargs).bind(self) | /relations_dil-0.6.13-py3-none-any.whl/relations/model.py | 0.639061 | 0.198919 | model.py | pypi |
import overscore
class Titles:
"""
Titles container
"""
id = None
fields = None
ids = None
titles = None
format = None
parents = None
def __init__(self, model):
self.id = model._id
self.fields = model._titles
self.ids = []
self.titles = {}
self.format = []
self.parents = {}
for field in self.fields:
relation = model._ancestor(field)
if relation is not None:
self.parents[field] = relation.Parent.many(**{f"{relation.parent_field}__in": model[field]}).titles()
self.format.extend(self.parents[field].format)
elif field in model._fields._names and model._fields._names[field].format is not None:
self.format.extend(model._fields._names[field].format)
else:
self.format.append(None)
def __len__(self):
"""
Use for number titles
"""
return len(self.ids)
def __contains__(self, id):
"""
Use whether in ids
"""
return id in self.ids
def __iter__(self):
"""
Use the order of ids
"""
return iter(self.ids)
def __setitem__(self, id, value):
"""
Set by id
"""
if id not in self.ids:
self.ids.append(id)
self.titles[id] = value
def __getitem__(self, id):
"""
Get by id
"""
return self.titles[id]
def __delitem__(self, id):
"""
Delete by id
"""
self.ids.pop(self.ids.index(id))
del self.titles[id]
def add(self, model): # pylint: disable=too-many-branches
"""
Adds a title given a model
"""
title = []
for name in self.fields: # pylint: disable=too-many-nested-blocks
if name in self.parents:
if model[name] in self.parents[name]:
title.extend(self.parents[name][model[name]])
else:
title.extend([None for _ in self.parents[name].format])
else:
path = overscore.parse(name)
field = path.pop(0)
title.extend(model._record._names[field].title(path))
self[model[self.id]] = title | /relations_dil-0.6.13-py3-none-any.whl/relations/titles.py | 0.567098 | 0.311113 | titles.py | pypi |
import collections
import relations_sql
import relations_postgresql
class QUERY(relations_postgresql.SQL, relations_sql.CLAUSE):
"""
Base query
"""
class SELECT(relations_postgresql.SQL, relations_sql.SELECT):
"""
SELECT
"""
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_postgresql.OPTIONS),
("FIELDS", relations_postgresql.FIELDS),
("FROM", relations_postgresql.FROM),
("WHERE", relations_postgresql.WHERE),
("GROUP_BY", relations_postgresql.GROUP_BY),
("HAVING", relations_postgresql.HAVING),
("ORDER_BY", relations_postgresql.ORDER_BY),
("LIMIT", relations_postgresql.LIMIT)
])
class INSERT(relations_postgresql.SQL, relations_sql.INSERT):
"""
INSERT query
"""
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_postgresql.OPTIONS),
("TABLE", relations_postgresql.TABLE_NAME),
("COLUMNS", relations_postgresql.COLUMN_NAMES),
("VALUES", relations_postgresql.VALUES),
("SELECT", SELECT)
])
class LIMITED(relations_postgresql.SQL, relations_sql.LIMITED):
"""
Clause that has a limit
"""
class UPDATE(relations_postgresql.SQL, relations_sql.UPDATE):
"""
UPDATE query
"""
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_postgresql.OPTIONS),
("TABLE", relations_postgresql.TABLE_NAME),
("SET", relations_postgresql.SET),
("WHERE", relations_postgresql.WHERE),
("ORDER_BY", relations_postgresql.ORDER_BY),
("LIMIT", relations_postgresql.LIMIT)
])
class DELETE(relations_postgresql.SQL, relations_sql.DELETE):
"""
DELETE query
"""
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_postgresql.OPTIONS),
("TABLE", relations_postgresql.TABLE_NAME),
("WHERE", relations_postgresql.WHERE),
("ORDER_BY", relations_postgresql.ORDER_BY),
("LIMIT", relations_postgresql.LIMIT)
]) | /relations_postgresql-0.6.2-py3-none-any.whl/relations_postgresql/query.py | 0.438545 | 0.218857 | query.py | pypi |
import glob
import copy
import json
import psycopg2
import psycopg2.extras
import relations
import relations_sql
import relations_postgresql
class Source(relations.Source): # pylint: disable=too-many-public-methods
"""
PsycoPg2 Source
"""
SQL = relations_sql.SQL
ASC = relations_sql.ASC
DESC = relations_sql.DESC
LIKE = relations_postgresql.LIKE
IN = relations_postgresql.IN
OR = relations_postgresql.OR
OP = relations_postgresql.OP
AS = relations_postgresql.AS
FIELDS = relations_postgresql.FIELDS
TABLE = relations_postgresql.TABLE
TABLE_NAME = relations_postgresql.TABLE_NAME
INSERT = relations_postgresql.INSERT
SELECT = relations_postgresql.SELECT
UPDATE = relations_postgresql.UPDATE
DELETE = relations_postgresql.DELETE
KIND = "postgresql"
database = None # Database to use
schema = None # Schema to use
connection = None # Connection
created = False # If we created the connection
def __init__(self, name, database, schema=None, connection=None, **kwargs):
self.database = database
self.schema = schema
if connection is not None:
self.connection = connection
else:
self.created = True
self.connection = psycopg2.connect(
dbname=self.database, cursor_factory=psycopg2.extras.RealDictCursor,
**{name: arg for name, arg in kwargs.items() if name not in ["name", "database", "schema", "connection"]}
)
def __del__(self):
if self.created and self.connection:
self.connection.close()
def execute(self, commands):
"""
Execute SQL
"""
if isinstance(commands, relations_sql.SQL):
commands.generate()
commands = commands.sql
if not isinstance(commands, list):
commands = commands.split(";\n")
cursor = self.connection.cursor()
for command in commands:
if command.strip():
cursor.execute(command)
self.connection.commit()
cursor.close()
def init(self, model):
"""
Init the model
"""
self.record_init(model._fields)
self.ensure_attribute(model, "SCHEMA")
self.ensure_attribute(model, "STORE")
if model.SCHEMA is None:
model.SCHEMA = self.schema
if model.STORE is None:
model.STORE = model.NAME
if model._id is not None and model._fields._names[model._id].auto is None and model._fields._names[model._id].kind == int:
model._fields._names[model._id].auto = True
def define(self, migration=None, definition=None):
"""
Creates the DDL for a model
"""
ddl = self.TABLE(migration, definition)
ddl.generate(indent=2)
return ddl.sql
def create_query(self, model):
"""
Get query for what's being inserted
"""
fields = [field.store for field in model._fields._order if not field.auto and not field.inject]
query = self.INSERT(self.TABLE_NAME(model.STORE, schema=model.SCHEMA), *fields)
if not model._bulk and model._id is not None and model._fields._names[model._id].auto:
if model._mode == "many":
raise relations.ModelError(model, "only one create query at a time")
return copy.deepcopy(query).VALUES(**model._record.create({})).bind(model)
for creating in model._each("create"):
query.VALUES(**creating._record.create({}))
return query
@staticmethod
def create_id(cursor, model, query):
"""
Inserts a single record and sets the id
"""
store = model._fields._names[model._id].store
query.generate()
cursor.execute("""%s RETURNING %s""" % (query.sql, query.quote(store)), tuple(query.args))
model[model._id] = cursor.fetchone()[store]
def create(self, model, query=None):
"""
Executes the create
"""
cursor = self.connection.cursor()
if not model._bulk and model._id is not None and model._fields._names[model._id].auto:
for creating in model._each("create"):
create_query = query or self.create_query(creating)
self.create_id(cursor, creating, create_query)
else:
create_query = query or self.create_query(model)
create_query.generate()
cursor.execute(create_query.sql, tuple(create_query.args))
cursor.close()
if not model._bulk:
for creating in model._each("create"):
for parent_child in creating.CHILDREN:
if creating._children.get(parent_child):
creating._children[parent_child].create()
creating._action = "update"
creating._record._action = "update"
model._action = "update"
else:
model._models = []
return model
def retrieve_field(self, field, query):
"""
Adds where caluse to query
"""
for operator, value in (field.criteria or {}).items():
name = f"{field.store}__{operator}"
extracted = operator.rsplit("__", 1)[0] in (field.extract or {})
query.WHERE(self.OP(name, value, EXTRACTED=extracted))
def like(self, model, query):
"""
Adds like information to the query
"""
if model._like is None:
return
titles = self.OR()
for name in model._titles:
path = name.split("__", 1)
name = path.pop(0)
field = model._fields._names[name]
parent = False
for relation in model.PARENTS.values():
if field.name == relation.child_field:
parent = relation.Parent.many(like=model._like).limit(model._chunk)
if parent[relation.parent_field]:
titles(self.IN(field.store, parent[relation.parent_field]))
model.overflow = model.overflow or parent.overflow
else:
parent = True
if not parent:
paths = path if path else field.titles
if paths:
for path in paths:
titles(self.LIKE(f"{field.store}__{path}", model._like, extracted=path in (field.extract or {})))
else:
titles(self.LIKE(field.store, model._like))
if titles:
query.WHERE(titles)
def sort(self, model, query):
"""
Adds sort information to the query
"""
for field in (model._sort or model._order or []):
query.ORDER_BY(**{field[1:]: (self.ASC if field[0] == "+" else self.DESC)})
model._sort = None
@staticmethod
def limit(model, query):
"""
Adds sort informaiton to the query
"""
if model._limit is not None:
query.LIMIT(model._limit)
if model._offset:
query.LIMIT(model._offset)
def count_query(self, model):
"""
Get query for what's being inserted
"""
query = self.SELECT(self.AS("total", self.SQL("COUNT(*)"))).FROM(self.TABLE_NAME(model.STORE, schema=model.SCHEMA))
model._collate()
self.retrieve_record(model._record, query)
self.like(model, query)
return query
def retrieve_query(self, model):
"""
Get query for what's being inserted
"""
query = self.count_query(model)
query.FIELDS = self.FIELDS("*")
self.sort(model, query)
self.limit(model, query)
return query
def titles_query(self, model):
"""
Get query for what's being selected
"""
return self.retrieve_query(model)
def count(self, model, query=None):
"""
Executes the count
"""
cursor = self.connection.cursor()
if query is None:
query = self.count_query(model)
query.generate()
cursor.execute(query.sql, query.args)
total = cursor.fetchone()["total"] if cursor.rowcount else 0
cursor.close()
return total
@staticmethod
def values_retrieve(model, values):
"""
Encodes the fields in json if needed
"""
for field in model._fields._order:
if isinstance(values.get(field.store), str) and field.kind not in [bool, int, float, str]:
values[field.store] = json.loads(values[field.store])
return values
def retrieve(self, model, verify=True, query=None):
"""
Executes the retrieve
"""
cursor = self.connection.cursor()
if query is None:
query = self.retrieve_query(model)
query.generate()
cursor.execute(query.sql, tuple(query.args))
if model._mode == "one" and cursor.rowcount > 1:
raise relations.ModelError(model, "more than one retrieved")
if model._mode == "one" and model._role != "child":
if cursor.rowcount < 1:
if verify:
raise relations.ModelError(model, "none retrieved")
return None
model._record = model._build("update", _read=self.values_retrieve(model, cursor.fetchone()))
else:
model._models = []
while len(model._models) < cursor.rowcount:
model._models.append(model.__class__(_read=self.values_retrieve(model, cursor.fetchone())))
if model._limit is not None:
model.overflow = model.overflow or len(model._models) >= model._limit
model._record = None
model._action = "update"
cursor.close()
return model
def titles(self, model, query=None):
"""
Creates the titles structure
"""
if model._action == "retrieve":
self.retrieve(model, query=query)
titles = relations.Titles(model)
for titling in model._each():
titles.add(titling)
return titles
def update_field(self, field, updates, query):
"""
Adds fields to update clause
"""
if field.store in updates and not field.auto:
query.SET(**{field.store: updates[field.store]})
def update_query(self, model):
"""
Create the update query
"""
query = self.UPDATE(self.TABLE_NAME(model.STORE, schema=model.SCHEMA))
if model._action == "retrieve" and model._record._action == "update":
self.update_record(model._record, model._record.mass({}), query)
elif model._id:
if model._mode == "many":
raise relations.ModelError(model, "only one update query at a time")
self.update_record(model._record, model._record.update({}), query)
query.WHERE(**{model._fields._names[model._id].store: model[model._id]})
else:
raise relations.ModelError(model, "nothing to update from")
self.retrieve_record(model._record, query)
return query
def update(self, model, query=None):
"""
Executes the update
"""
cursor = self.connection.cursor()
updated = 0
# If the overall model is retrieving and the record has values set
if model._action == "retrieve" and model._record._action == "update":
update_query = query or self.update_query(model)
update_query.generate()
cursor.execute(update_query.sql, update_query.args)
updated = cursor.rowcount
elif model._id:
for updating in model._each("update"):
update_query = query or self.update_query(updating)
if update_query.SET:
update_query.generate()
cursor.execute(update_query.sql, update_query.args)
for parent_child in updating.CHILDREN:
if updating._children.get(parent_child):
updating._children[parent_child].create().update()
updated += cursor.rowcount
else:
raise relations.ModelError(model, "nothing to update from")
return updated
def delete_query(self, model):
"""
Create the update query
"""
query = self.DELETE(self.TABLE_NAME(model.STORE, schema=model.SCHEMA))
if model._action == "retrieve":
self.retrieve_record(model._record, query)
elif model._id:
ids = []
store = model._fields._names[model._id].store
for deleting in model._each():
ids.append(deleting[model._id])
query.WHERE(**{f"{store}__in": ids})
else:
raise relations.ModelError(model, "nothing to delete from")
return query
def delete(self, model, query=None):
"""
Executes the delete
"""
cursor = self.connection.cursor()
delete_query = query or self.delete_query(model)
delete_query.generate()
cursor.execute(delete_query.sql, tuple(delete_query.args))
return cursor.rowcount
def definition(self, file_path, source_path):
""""
Converts a definition file to a MySQL definition file
"""
definitions = []
with open(file_path, "r") as definition_file:
definition = json.load(definition_file)
for name in sorted(definition.keys()):
if definition[name]["source"] == self.name:
definitions.append(self.define(definition[name]))
if definitions:
file_name = file_path.split("/")[-1].split('.')[0]
with open(f"{source_path}/{file_name}.sql", "w") as source_file:
source_file.write("\n".join(definitions))
def migration(self, file_path, source_path):
""""
Converts a migration file to a source definition file
"""
migrations = []
with open(file_path, "r") as migration_file:
migration = json.load(migration_file)
for add in sorted(migration.get('add', {}).keys()):
if migration['add'][add]["source"] == self.name:
migrations.append(self.define(migration['add'][add]))
for remove in sorted(migration.get('remove', {}).keys()):
if migration['remove'][remove]["source"] == self.name:
migrations.append(self.define(definition=migration['remove'][remove]))
for change in sorted(migration.get('change', {}).keys()):
if migration['change'][change]['definition']["source"] == self.name:
migrations.append(
self.define(migration['change'][change]['migration'], migration['change'][change]['definition'])
)
if migrations:
file_name = file_path.split("/")[-1].split('.')[0]
with open(f"{source_path}/{file_name}.sql", "w") as source_file:
source_file.write("\n".join(migrations))
def load(self, load_path):
"""
Load a file
"""
with open(load_path, 'r') as load_file:
self.execute(load_file.read().split(";\n"))
def list(self, source_path):
"""
List the migration by pairs
"""
migrations = {}
for file_path in glob.glob(f"{source_path}/*-*.sql"):
file_name = file_path.rsplit("/", 1)[-1]
kind, stamp = file_name.split('.')[0].split('-', 1)
migrations.setdefault(stamp, {})
migrations[stamp][kind] = file_name
return migrations
def migrate(self, source_path):
"""
Migrate all the existing files to where we are
"""
class Migration(relations.Model):
"""
Model for migrations
"""
SOURCE = self.name
STORE = "_relations_migration"
UNIQUE = False
stamp = str
migrated = False
self.execute(Migration.define())
stamps = Migration.many().stamp
migration_paths = sorted(glob.glob(f"{source_path}/migration-*.sql"))
if not stamps:
migration = Migration().bulk().add("definition")
for migration_path in migration_paths:
stamp = migration_path.rsplit("/migration-", 1)[-1].split('.')[0]
migration.add(stamp)
migration.create()
self.connection.commit()
self.load(f"{source_path}/definition.sql")
migrated = True
else:
for migration_path in migration_paths:
stamp = migration_path.rsplit("/migration-", 1)[-1].split('.')[0]
if stamp not in stamps:
Migration(stamp).create()
self.connection.commit()
self.load(migration_path)
migrated = True
return migrated | /relations_psycopg2-0.6.9-py3-none-any.whl/relations_psycopg2.py | 0.497803 | 0.180757 | relations_psycopg2.py | pypi |
# relations-pymysql
DB Modeling for MySQL using the PyMySQL library
Relations overall is designed to be a simple, straight forward, flexible DIL (data interface layer).
Quite different from other DIL's, it has the singular, microservice based purpose to:
- Create models with very little code, independent of backends
- Create CRUD API with a database backend from those models with very little code
- Create microservices to use those same models but with that CRUD API as the backend
Ya, that last one is kinda new I guess.
Say we create a service, composed of microservices, which in turn is to be consumed by other services made of microservices.
You should only need to define the model once. Your conceptual structure is the same, to the DB, the API, and anything using that API. You shouldn't have say that structure over and over. You shouldn't have to define CRUD endpoints over and over. That's so boring, tedious, and unnecessary.
Furthermore, the conceptual structure is based not the backend of what you've going to use at that moment of time (scaling matters) but on the relations, how the pieces interact. If you know the structure of the data, that's all you need to interact with the data.
So with Relations, Models and Fields are defined independent of any backend, which instead is set at runtime. So the API will use a DB, everything else will use that API.
This is just the MySQL backend of models and what not.
Don't have great docs yet so I've included some of the unittests to show what's possible.
# Example
## define
```python
import relations
import relations_pymysql
# The source is a string, the backend of which is defined at runtime
class SourceModel(relations.Model):
SOURCE = "PyMySQLSource"
class Simple(SourceModel):
id = int
name = str
class Plain(SourceModel):
ID = None # This table has no primary id field
simple_id = int
name = str
# This makes Simple a parent of Plain
relations.OneToMany(Simple, Plain)
class Meta(SourceModel):
id = int
name = str
flag = bool
spend = float
people = set # JSON storage
stuff = list # JSON stroage
things = dict, {"extract": "for__0____1"} # Extracts things["for"][0][-1] as a virtual column
push = str, {"inject": "stuff___1__relations.io____1"} # Injects this value into stuff[-1]["relations.io"]["1"]
def subnet_attr(values, value):
values["address"] = str(value)
min_ip = value[0]
max_ip = value[-1]
values["min_address"] = str(min_ip)
values["min_value"] = int(min_ip)
values["max_address"] = str(max_ip)
values["max_value"] = int(max_ip)
class Net(SourceModel):
id = int
ip = ipaddress.IPv4Address, { # The field type is that of a class, with the storage being JSON
"attr": {
"compressed": "address", # Storge compressed attr as address key in JSON
"__int__": "value" # Storge int() as value key in JSON
},
"init": "address", # Initilize with address from JSON
"titles": "address", # Use address from JSON as the how to list this field
"extract": {
"address": str, # Extract address as virtual column
"value": int # Extra value as virtual column
}
}
subnet = ipaddress.IPv4Network, {
"attr": subnet_attr,
"init": "address",
"titles": "address"
}
TITLES = "ip__address" # When listing, use ip["address"] as display value
INDEX = "ip__value" # Create an index on the virtual column ip __value
class Unit(SourceModel):
id = int
name = str, {"format": "fancy"}
class Test(SourceModel):
id = int
unit_id = int
name = str, {"format": "shmancy"}
class Case(SourceModel):
id = int
test_id = int
name = str
relations.OneToMany(Unit, Test)
relations.OneToOne(Test, Case)
# With this statement, all the above models now how this MySQL database as a backend
self.source = relations_pymysql.Source("PyMySQLSource", "test_source", host="localhost", user='user', password='passwd')
# Simple.thy().define() = model definition independent of source
# self.source.define(Simple.thy().define()) = model definition for source (SQL in this case)
self.assertEqual(self.source.define(Simple.thy().define()),
"""CREATE TABLE IF NOT EXISTS `test_source`.`simple` (
`id` BIGINT AUTO_INCREMENT,
`name` VARCHAR(255) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE `name` (`name`)
);
""")
# Create the database
self.source.connection.cursor().execute("CREATE DATABASE IF NOT EXISTS `test_source`")
# Create tables in database from models
self.source.execute(Unit.define())
self.source.execute(Test.define())
self.source.execute(Case.define())
self.source.execute(Meta.define())
self.source.execute(Net.define())
```
## create
```python
simple = Simple("sure")
simple.plain.add("fine")
self.source.execute(Simple.define())
self.source.execute(Plain.define())
self.source.execute(Meta.define())
simple.create()
self.assertEqual(simple.id, 1)
self.assertEqual(simple._action, "update")
self.assertEqual(simple._record._action, "update")
self.assertEqual(simple.plain[0].simple_id, 1)
self.assertEqual(simple.plain._action, "update")
self.assertEqual(simple.plain[0]._record._action, "update")
cursor = self.source.connection.cursor()
cursor.execute("SELECT * FROM test_source.simple")
self.assertEqual(cursor.fetchone(), {"id": 1, "name": "sure"})
simples = Simple.bulk().add("ya").create()
self.assertEqual(simples._models, [])
cursor.execute("SELECT * FROM test_source.simple WHERE name='ya'")
self.assertEqual(cursor.fetchone(), {"id": 2, "name": "ya"})
cursor.execute("SELECT * FROM test_source.plain")
self.assertEqual(cursor.fetchone(), {"simple_id": 1, "name": "fine"})
model = Meta("yep", True, 3.50, {"tom", "mary"}, [1, None], {"for": [{"1": "yep"}]}, "sure").create()
cursor.execute("SELECT * FROM test_source.meta")
self.assertEqual(self.source.values_retrieve(model, cursor.fetchone()), {
"id": 1,
"name": "yep",
"flag": 1,
"spend": 3.50,
"people": ["mary", "tom"],
"stuff": [1, {"relations.io": {"1": "sure"}}],
"things": {"for": [{"1": "yep"}]},
"things__for__0____1": "yep"
})
cursor.close()
```
## retrieve
```python
self.source.execute(Unit.define())
self.source.execute(Test.define())
self.source.execute(Case.define())
self.source.execute(Meta.define())
self.source.execute(Net.define())
Unit([["stuff"], ["people"]]).create()
models = Unit.one(name__in=["people", "stuff"])
self.assertRaisesRegex(relations.ModelError, "unit: more than one retrieved", models.retrieve)
model = Unit.one(name="things")
self.assertRaisesRegex(relations.ModelError, "unit: none retrieved", model.retrieve)
self.assertIsNone(model.retrieve(False))
unit = Unit.one(name="people")
self.assertEqual(unit.id, 2)
self.assertEqual(unit._action, "update")
self.assertEqual(unit._record._action, "update")
unit.test.add("things")[0].case.add("persons")
unit.update()
model = Unit.many(test__name="things")
self.assertEqual(model.id, [2])
self.assertEqual(model[0]._action, "update")
self.assertEqual(model[0]._record._action, "update")
self.assertEqual(model[0].test[0].id, 1)
self.assertEqual(model[0].test[0].case.name, "persons")
model = Unit.many(like="p")
self.assertEqual(model.name, ["people"])
model = Test.many(like="p").retrieve()
self.assertEqual(model.name, ["things"])
self.assertFalse(model.overflow)
model = Test.many(like="p", _chunk=1).retrieve()
self.assertEqual(model.name, ["things"])
self.assertTrue(model.overflow)
Meta("yep", True, 1.1, {"tom"}, [1, None], {"a": 1}).create()
model = Meta.one(name="yep")
self.assertEqual(model.flag, True)
self.assertEqual(model.spend, 1.1)
self.assertEqual(model.people, {"tom"})
self.assertEqual(model.stuff, [1, {"relations.io": {"1": None}}])
self.assertEqual(model.things, {"a": 1})
self.assertEqual(Unit.many().name, ["people", "stuff"])
self.assertEqual(Unit.many().sort("-name").name, ["stuff", "people"])
self.assertEqual(Unit.many().sort("-name").limit(1, 1).name, ["people"])
self.assertEqual(Unit.many().sort("-name").limit(0).name, [])
self.assertEqual(Unit.many(name="people").limit(1).name, ["people"])
Meta("dive", people={"tom", "mary"}, stuff=[1, 2, 3, None], things={"a": {"b": [1, 2], "c": "sure"}, "4": 5, "for": [{"1": "yep"}]}).create()
model = Meta.many(people={"tom", "mary"})
self.assertEqual(model[0].name, "dive")
model = Meta.many(stuff=[1, 2, 3, {"relations.io": {"1": None}}])
self.assertEqual(model[0].name, "dive")
model = Meta.many(things={"a": {"b": [1, 2], "c": "sure"}, "4": 5, "for": [{"1": "yep"}]})
self.assertEqual(model[0].name, "dive")
model = Meta.many(stuff__1=2)
self.assertEqual(model[0].name, "dive")
model = Meta.many(things__a__b__0=1)
self.assertEqual(model[0].name, "dive")
model = Meta.many(things__a__c__like="su")
self.assertEqual(model[0].name, "dive")
model = Meta.many(things__a__d__null=True)
self.assertEqual(model[0].name, "dive")
model = Meta.many(things____4=5)
self.assertEqual(model[0].name, "dive")
model = Meta.many(things__a__b__0__gt=1)
self.assertEqual(len(model), 0)
model = Meta.many(things__a__c__notlike="su")
self.assertEqual(len(model), 0)
model = Meta.many(things__a__d__null=False)
self.assertEqual(len(model), 0)
model = Meta.many(things____4=6)
self.assertEqual(len(model), 0)
model = Meta.many(things__a__b__has=1)
self.assertEqual(len(model), 1)
model = Meta.many(things__a__b__has=3)
self.assertEqual(len(model), 0)
model = Meta.many(things__a__b__any=[1, 3])
self.assertEqual(len(model), 1)
model = Meta.many(things__a__b__any=[4, 3])
self.assertEqual(len(model), 0)
model = Meta.many(things__a__b__all=[2, 1])
self.assertEqual(len(model), 1)
model = Meta.many(things__a__b__all=[3, 2, 1])
self.assertEqual(len(model), 0)
model = Meta.many(people__has="mary")
self.assertEqual(len(model), 1)
model = Meta.many(people__has="dick")
self.assertEqual(len(model), 0)
model = Meta.many(people__any=["mary", "dick"])
self.assertEqual(len(model), 1)
model = Meta.many(people__any=["harry", "dick"])
self.assertEqual(len(model), 0)
model = Meta.many(people__all=["mary", "tom"])
self.assertEqual(len(model), 1)
model = Meta.many(people__all=["tom", "dick", "mary"])
self.assertEqual(len(model), 0)
Net(ip="1.2.3.4", subnet="1.2.3.0/24").create()
Net().create()
model = Net.many(like='1.2.3.')
self.assertEqual(model[0].ip.compressed, "1.2.3.4")
model = Net.many(ip__address__like='1.2.3.')
self.assertEqual(model[0].ip.compressed, "1.2.3.4")
model = Net.many(ip__value__gt=int(ipaddress.IPv4Address('1.2.3.0')))
self.assertEqual(model[0].ip.compressed, "1.2.3.4")
model = Net.many(subnet__address__like='1.2.3.')
self.assertEqual(model[0].ip.compressed, "1.2.3.4")
model = Net.many(subnet__min_value=int(ipaddress.IPv4Address('1.2.3.0')))
self.assertEqual(model[0].ip.compressed, "1.2.3.4")
model = Net.many(ip__address__notlike='1.2.3.')
self.assertEqual(len(model), 0)
model = Net.many(ip__value__lt=int(ipaddress.IPv4Address('1.2.3.0')))
self.assertEqual(len(model), 0)
model = Net.many(subnet__address__notlike='1.2.3.')
self.assertEqual(len(model), 0)
model = Net.many(subnet__max_value=int(ipaddress.IPv4Address('1.2.3.0')))
self.assertEqual(len(model), 0)
```
## update
```python
self.source.execute(Unit.define())
self.source.execute(Test.define())
self.source.execute(Case.define())
self.source.execute(Meta.define())
self.source.execute(Net.define())
Unit([["people"], ["stuff"]]).create()
unit = Unit.many(id=2).set(name="things")
self.assertEqual(unit.update(), 1)
unit = Unit.one(2)
unit.name = "thing"
unit.test.add("moar")
self.assertEqual(unit.update(), 1)
self.assertEqual(unit.name, "thing")
self.assertEqual(unit.test[0].id, 1)
self.assertEqual(unit.test[0].name, "moar")
Meta("yep", True, 1.1, {"tom"}, [1, None], {"a": 1}).create()
Meta.one(name="yep").set(flag=False, people=set(), stuff=[], things={}).update()
model = Meta.one(name="yep")
self.assertEqual(model.flag, False)
self.assertEqual(model.spend, 1.1)
self.assertEqual(model.people, set())
self.assertEqual(model.stuff, [])
self.assertEqual(model.things, {})
plain = Plain.one()
self.assertRaisesRegex(relations.ModelError, "plain: nothing to update from", plain.update)
ping = Net(ip="1.2.3.4", subnet="1.2.3.0/24").create()
pong = Net(ip="5.6.7.8", subnet="5.6.7.0/24").create()
Net.many().set(subnet="9.10.11.0/24").update()
self.assertEqual(Net.one(ping.id).subnet.compressed, "9.10.11.0/24")
self.assertEqual(Net.one(pong.id).subnet.compressed, "9.10.11.0/24")
Net.one(ping.id).set(ip="13.14.15.16").update()
self.assertEqual(Net.one(ping.id).ip.compressed, "13.14.15.16")
self.assertEqual(Net.one(pong.id).ip.compressed, "5.6.7.8")
```
## delete
```python
self.source.execute(Unit.define())
self.source.execute(Test.define())
self.source.execute(Case.define())
self.source.execute(Plain.define())
unit = Unit("people")
unit.test.add("stuff").add("things")
unit.create()
self.assertEqual(Test.one(id=2).delete(), 1)
self.assertEqual(len(Test.many()), 1)
self.assertEqual(Unit.one(1).test.delete(), 1)
self.assertEqual(Unit.one(1).retrieve().delete(), 1)
self.assertEqual(len(Unit.many()), 0)
self.assertEqual(len(Test.many()), 0)
self.assertEqual(Test.many().delete(), 0)
plain = Plain(0, "nope").create()
self.assertRaisesRegex(relations.ModelError, "plain: nothing to delete from", plain.delete)
```
| /relations-pymysql-0.6.12.tar.gz/relations-pymysql-0.6.12/README.md | 0.549399 | 0.853272 | README.md | pypi |
import requests
import relations
class Source(relations.Source):
"""
Source with a REST backend
"""
url = None
session = None
def __init__(self, name, url, session=None, **kwargs): # pylint: disable=unused-argument
self.url = url
if session is not None:
self.session = session
else:
self.session = requests.Session()
for key, arg in kwargs.items():
if key not in ["name", "url"]:
setattr(self.session, key, arg)
@staticmethod
def result(model, key, response):
"""
Checks a response and returns the result
"""
if response.status_code >= 400:
raise relations.ModelError(model, response.json().get("message", "API Error"))
body = response.json()
if "overflow" in body:
model.overflow = model.overflow or body["overflow"]
return body[key]
def init(self, model):
"""
Init the model
"""
self.record_init(model._fields)
self.ensure_attribute(model, "SINGULAR")
self.ensure_attribute(model, "PLURAL")
self.ensure_attribute(model, "ENDPOINT")
if model.SINGULAR is None:
model.SINGULAR = model.NAME
if model.PLURAL is None:
model.PLURAL = f"{model.SINGULAR}s"
if model.ENDPOINT is None:
model.ENDPOINT = model.SINGULAR
if model._id is not None and model._fields._names[model._id].auto is None:
model._fields._names[model._id].auto = True
def create_field(self, field, values):
"""
Updates values with the field's that changed
"""
if not field.auto:
values[field.name] = field.export()
def create(self, model):
"""
Executes the create
"""
models = model._each("create")
values = []
for creating in models:
record = {}
self.create_record(creating._record, record)
values.append(record)
records = self.result(model, model.PLURAL, self.session.post(f"{self.url}/{model.ENDPOINT}", json={model.PLURAL: values}))
for index, creating in enumerate(models):
if model._id is not None and model._fields._names[model._id].auto:
creating[model._id] = records[index][model._fields._names[model._id].store]
if not model._bulk:
for parent_child in creating.CHILDREN:
if creating._children.get(parent_child):
creating._children[parent_child].create()
creating._action = "update"
creating._record._action = "update"
if model._bulk:
model._models = []
else:
model._action = "update"
return model
def retrieve_field(self, field, criteria):
"""
Adds critera to the filter
"""
for operator, value in (field.criteria or {}).items():
criteria[f"{field.name}__{operator}"] = sorted(value) if isinstance(value, set) else value
def count(self, model):
"""
Executes the retrieve
"""
model._collate()
body = {"filter": {}}
self.retrieve_record(model._record, body["filter"])
body["count"] = True
if model._like:
body["filter"]["like"] = model._like
return self.result(model, model.PLURAL, self.session.get(f"{self.url}/{model.ENDPOINT}", json=body))
def retrieve(self, model, verify=True):
"""
Executes the retrieve
"""
model._collate()
body = {"filter": {}}
self.retrieve_record(model._record, body["filter"])
if model._like:
body["filter"]["like"] = model._like
if model._sort:
body["sort"] = model._sort
if model._limit is not None:
body["limit"] = {"per_page": model._limit}
if model._offset:
body["limit"]["start"] = model._offset
matches = self.result(model, model.PLURAL, self.session.get(f"{self.url}/{model.ENDPOINT}", json=body))
if model._mode == "one" and len(matches) > 1:
raise relations.ModelError(model, "more than one retrieved")
if model._mode == "one" and model._role != "child":
if len(matches) < 1:
if verify:
raise relations.ModelError(model, "none retrieved")
return None
model._record = model._build("update", _read=matches[0])
else:
model._models = []
for match in matches:
model._models.append(model.__class__(_read=match))
model._record = None
model._action = "update"
return model
def titles(self, model):
"""
Creates the titles structure
"""
if model._action == "retrieve":
self.retrieve(model)
titles = relations.Titles(model)
for titling in model._each():
titles.add(titling)
return titles
def update_field(self, field, values):
"""
Updates values with the field's that changed
"""
if not field.auto and field.delta():
values[field.name] = field.original = field.export()
def field_mass(self, field, values):
"""
Mass values with the field's that changed
"""
if not field.auto and field.changed:
values[field.name] = field.export()
def update(self, model):
"""
Executes the update
"""
# If the overall model is retrieving and the record has values set
updated = 0
if model._action == "retrieve" and model._record._action == "update":
criteria = {}
self.retrieve_record(model._record, criteria)
values = {}
self.record_mass(model._record, values)
updated += self.result(model, "updated", self.session.patch(
f"{self.url}/{model.ENDPOINT}", json={"filter": criteria, model.PLURAL: values})
)
elif model._id:
for updating in model._each("update"):
values = {}
self.update_record(updating._record, values)
updated += self.result(updating, "updated", self.session.patch(
f"{self.url}/{model.ENDPOINT}/{updating[model._id]}", json={model.SINGULAR: values})
)
for parent_child in updating.CHILDREN:
if updating._children.get(parent_child):
updating._children[parent_child].create().update()
else:
raise relations.ModelError(model, "nothing to update from")
return updated
def delete(self, model):
"""
Executes the delete
"""
criteria = {}
if model._action == "retrieve":
self.retrieve_record(model._record, criteria)
elif model._id:
criterion = f"{model._id}__in"
criteria[criterion] = []
for deleting in model._each():
criteria[criterion].append(deleting[model._id])
deleting._action = "create"
model._action = "create"
else:
raise relations.ModelError(model, "nothing to delete from")
return self.result(model, "deleted", self.session.delete(f"{self.url}/{model.ENDPOINT}", json={"filter": criteria})) | /relations_rest-0.5.0-py3-none-any.whl/relations_rest.py | 0.718496 | 0.248073 | relations_rest.py | pypi |
import collections
import flask_restx
from werkzeug.utils import cached_property
class OpenApi(flask_restx.Swagger):
"""
Overrride Flask RestX Swagger
"""
@staticmethod
def relations_value(field): # pylint: disable=too-many-return-statements
"""
Generates an example alues
"""
if "default" in field:
return field["default"]
if field.get("options"):
if field["kind"] == "set":
return [field["options"][0]]
return field["options"][0]
if field["kind"] == "str":
return ""
if field["kind"] == "int":
return 0
if field["kind"] == "bool":
return False
if field["kind"] == "float":
return 0.0
return None
@classmethod
def relations_example(cls, thy, readonly=False):
"""
Generates an example for a single record
"""
example = {}
for field in thy._fields:
if readonly or not field.get("readonly"):
example[field["name"]] = cls.relations_value(field)
return example
@staticmethod
def relations_schemas(thy):
"""
Generates specs from fields
"""
record = {
"type": "object",
"properties": {},
}
required = []
for field in thy._fields:
property = {
"type": field["kind"]
}
if field.get("readonly"):
property["readOnly"] = True
if field.get("required"):
required.append(field["name"])
record["properties"][field['name']] = property
if required:
record["required"] = required
singular = {
"type": "object",
"properties": {
thy.SINGULAR: {
"$ref": f"#/components/schemas/{thy._model.TITLE}"
}
}
}
plural = {
"type": "object",
"properties": {
thy.PLURAL: {
"type": "array",
"items": {
"$ref": f"#/components/schemas/{thy._model.TITLE}"
}
}
}
}
filter = {
"type": "object",
"properties": {
"filter": {
"$ref": f"#/components/schemas/{thy._model.TITLE}"
}
},
}
sort = {
"type": "object",
"properties": {
"sort": {
"type": "array",
"description": "sort by these fields, prefix with + for ascending (default), - for descending",
"default": thy._model._order
}
}
}
limit = {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": f"limit the number of {thy.PLURAL}"
},
"limit__start": {
"type": "integer",
"description": f"limit the number of {thy.PLURAL} starting here"
},
"limit__per_page": {
"type": "integer",
"description": f"limit the number of {thy.PLURAL} by this page size (default {thy._model.CHUNK})"
},
"limit__page": {
"type": "integer",
"description": f"limit the number of {thy.PLURAL} and retrieve this page"
}
}
}
count = {
"type": "object",
"properties": {
"count": {
"type": "boolean",
"description": f"return only the count of {thy.PLURAL} found"
}
}
}
return {
thy._model.TITLE: record,
thy.SINGULAR: singular,
thy.PLURAL: plural,
f"{thy.SINGULAR}_filter": filter,
f"{thy.SINGULAR}_sort": sort,
f"{thy.SINGULAR}_limit": limit,
f"{thy.SINGULAR}_count": count
}
@classmethod
def relations_create_options(cls, thy):
"""
Generates create options operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_create_options",
"summary": f"generates and validates fields to create one {thy.SINGULAR} or many {thy.PLURAL}",
"description": f"To generate, send nothing. To validate, send a {thy.SINGULAR}.",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": f"#/components/schemas/{thy.SINGULAR}"
},
"examples": {
"generate": {
"value": {}
},
"validate": {
"value": {
thy.SINGULAR: cls.relations_example(thy)
}
}
}
}
}
},
"responses": {
"200": {
"description": f"fields to create one {thy.SINGULAR} or many {thy.PLURAL} generated and validated",
"content": {
"application/json": {
"schema": {
"$ref": '#/components/schemas/Options'
}
}
}
}
}
}
@classmethod
def relations_create_filter(cls, thy):
"""
Generates create operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_create_search",
"summary": f"creates one {thy.SINGULAR} or many {thy.PLURAL} or a complex retrieve",
"description": f"To create one, send {thy.SINGULAR}. To create many, send {thy.PLURAL}. To retrieve send filter (sort, limit, count optional).",
"requestBody": {
"content": {
"application/json": {
"schema": {
"oneOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}",
},
{
"$ref": f"#/components/schemas/{thy.PLURAL}"
},
{
"oneOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_filter"
}
],
"anyOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_sort"
},
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_limit"
},
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_count"
}
]
}
]
},
"examples": {
"create one": {
"value": {
thy.SINGULAR: cls.relations_example(thy)
}
},
"create many": {
"value": {
thy.PLURAL: [cls.relations_example(thy)]
}
},
"complex retrieve": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order
}
},
"limit retrieve": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"limit": thy._model.CHUNK,
"start": 0
}
}
},
"paginate retrieve": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"page": 1,
"per_page": thy._model.CHUNK
}
}
},
"count retrieve": {
"value": {
"filter": cls.relations_example(thy),
"count": True
}
}
}
}
}
},
"responses": {
"200": {
"description": f"many {thy.PLURAL} retrieved",
"content": {
"application/json": {
"schema": {
"oneOf": [
{
"$ref": f"#/components/schemas/{thy.PLURAL}"
},
{
"$ref": "#/components/schemas/Retrieved"
},
]
},
"examples": {
"list retrieve": {
"value": {
thy.PLURAL: [cls.relations_example(thy, readonly=True)],
"overflow": False,
"formats": {}
}
},
"count retrieve": {
"value": {
"count": 1
}
}
}
}
}
},
"201": {
"description": f"one {thy.SINGULAR} or many {thy.PLURAL} created",
"content": {
"application/json": {
"schema": {
"oneOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}",
"description": "lol"
},
{
"$ref": f"#/components/schemas/{thy.PLURAL}"
}
]
},
"examples": {
"create one": {
"value": {
thy.SINGULAR: cls.relations_example(thy, readonly=True)
}
},
"create many": {
"value": {
thy.PLURAL: [cls.relations_example(thy, readonly=True)]
}
}
}
}
}
},
"400": {
"description": "unable to create due to bad request"
}
}
}
@classmethod
def relations_retrieve_many(cls, thy):
"""
Generates reteieve many operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_retrieve_many",
"summary": f"retrieves many {thy.PLURAL}",
"parameters": [
{
"in": "query",
"schema": {
"type": "object",
"properties": {
"dude": {
"type": "integer",
"example": 5,
"description": "lolwut"
}
},
},
"style": "form",
"explode": True,
"name": "params",
"examples": {
"retrieve": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order)
}
},
"limit": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit": thy._model.CHUNK,
"limit__start": 0
}
},
"paginate": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit__page": 1,
"limit__per_page": thy._model.CHUNK
}
},
"count": {
"value": {
**cls.relations_example(thy),
"count": 1
}
}
}
}
],
"responses": {
"200": {
"description": f"many {thy.PLURAL} retrieved",
"content": {
"application/json": {
"schema": {
"$ref": f"#/components/schemas/{thy.PLURAL}"
},
"examples": {
"list retrieve": {
"value": {
thy.PLURAL: [cls.relations_example(thy, readonly=True)],
"overflow": False,
"formats": {}
}
},
"count retrieve": {
"value": {
"count": 1
}
}
}
}
}
}
}
}
@classmethod
def relations_update_many(cls, thy):
"""
Generates update emany operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_update_many",
"summary": f"updates many {thy.PLURAL}",
"parameters": [
{
"in": "query",
"schema": {
"type": "object"
},
"style": "form",
"explode": True,
"name": "params",
"examples": {
"filter through params": {
"value": {
**cls.relations_example(thy)
}
},
"filter through params limit": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit": thy._model.CHUNK,
"limit__start": 0
}
},
"filter through params paginate": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit__page": 1,
"limit__per_page": thy._model.CHUNK
}
},
"filter through body": {
"value": {}
}
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"anyOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}"
},
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_filter"
}
]
},
"examples": {
"filter through params": {
"value": {
thy.PLURAL: cls.relations_example(thy)
}
},
"filter through body": {
"value": {
"filter": cls.relations_example(thy),
thy.PLURAL: cls.relations_example(thy)
}
},
"filter through body limit": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"limit": thy._model.CHUNK,
"start": 0
},
thy.PLURAL: cls.relations_example(thy)
}
},
"filter through body paginate": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"page": 1,
"per_page": thy._model.CHUNK
},
thy.PLURAL: cls.relations_example(thy)
}
},
"update all": {
"value": {
"filter": {},
thy.PLURAL: cls.relations_example(thy)
}
}
}
}
}
},
"responses": {
"202": {
"description": f"many {thy.PLURAL} updated",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Updated"
}
}
}
},
"400": {
"description": "unable to update due to bad request"
}
}
}
@classmethod
def relations_delete_many(cls, thy):
"""
Generates delete many operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_delete_many",
"summary": f"deletes many {thy.PLURAL}",
"parameters": [
{
"in": "query",
"schema": {
"type": "object"
},
"style": "form",
"explode": True,
"name": "params",
"examples": {
"filter through params": {
"value": {
**cls.relations_example(thy)
}
},
"filter through params limit": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit": thy._model.CHUNK,
"limit__start": 0
}
},
"filter through params paginate": {
"value": {
**cls.relations_example(thy),
"sort": ",".join(thy._model._order),
"limit__page": 1,
"limit__per_page": thy._model.CHUNK
}
},
"filter through body": {
"value": {}
}
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"anyOf": [
{
"$ref": f"#/components/schemas/{thy.SINGULAR}_filter"
}
]
},
"examples": {
"filter through params": {
"value": {}
},
"filter through body": {
"value": {
"filter": cls.relations_example(thy)
}
},
"filter through body limit": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"limit": thy._model.CHUNK,
"start": 0
}
}
},
"filter through body paginate": {
"value": {
"filter": cls.relations_example(thy),
"sort": thy._model._order,
"limit": {
"page": 1,
"per_page": thy._model.CHUNK
}
}
},
"delete all": {
"value": {
"filter": {}
}
}
}
}
}
},
"responses": {
"202": {
"description": f"many {thy.PLURAL} deleted",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Deleted"
}
}
}
},
"400": {
"description": "unable to updeletedate due to bad request"
}
}
}
@classmethod
def relations_update_options(cls, thy):
"""
Generates update options operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_update_options",
"summary": f"generates and validates fields to update one {thy.SINGULAR}",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": f"#/components/schemas/{thy.SINGULAR}"
},
"examples": {
"generate": {
"value": {}
},
"validate": {
"value": {
thy.SINGULAR: cls.relations_example(thy)
}
}
}
}
}
},
"responses": {
"200": {
"description": f"fields to update one {thy.SINGULAR} generated and validated",
"content": {
"application/json": {
"schema": {
"$ref": '#/components/schemas/Options'
}
}
}
},
"404": {
"description": f"{thy.SINGULAR} not found"
}
}
}
@classmethod
def relations_retrieve_one(cls, thy):
"""
Generates retrieve one operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_retrieve_one",
"summary": f"retrieves one {thy.SINGULAR}",
"responses": {
"200": {
"description": f"one {thy.SINGULAR} retrieved",
"content": {
"application/json": {
"schema": {
"$ref": f"#/components/schemas/{thy.SINGULAR}"
},
"examples": {
"retrieve": {
"value": {
thy.SINGULAR: cls.relations_example(thy, readonly=True),
"overflow": False,
"formats": {}
}
}
}
}
}
},
"404": {
"description": f"{thy.SINGULAR} not found"
}
}
}
@classmethod
def relations_update_one(cls, thy):
"""
Generates update one operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_update_one",
"summary": f"updates one {thy.SINGULAR}",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": f"#/components/schemas/{thy.SINGULAR}"
},
"examples": {
"update": {
"value": {
thy.SINGULAR: cls.relations_example(thy)
}
}
}
}
}
},
"responses": {
"202": {
"description": f"one {thy.SINGULAR} updated",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Updated"
}
}
}
},
"400": {
"description": "unable to update due to bad request"
},
"404": {
"description": f"{thy.SINGULAR} not found"
}
}
}
@classmethod
def relations_delete_one(cls, thy):
"""
Generates delete one operation
"""
return {
"tags": [thy._model.TITLE],
"operationId": f"{thy.SINGULAR}_delete_one",
"summary": f"deletes one {thy.SINGULAR}",
"responses": {
"202": {
"description": f"one {thy.SINGULAR} deleted",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/"
}
}
}
},
"404": {
"description": f"{thy.SINGULAR} not found"
}
}
}
def relations_operations(self, specs, ns, urls, thy): # pylint: disable=too-many-branches
"""
Generates operations for all methods
"""
for url in self.api.ns_urls(ns, urls):
path = flask_restx.swagger.extract_path(url)
methods = specs["paths"][path]
specs["paths"][path] = collections.OrderedDict()
for method in ["options", "post", "get", "patch", "delete"]:
if "{" not in path:
if method == "options":
specs["paths"][path][method] = {**methods[method], **self.relations_create_options(thy)}
elif method == "post":
specs["paths"][path][method] = {**methods[method], **self.relations_create_filter(thy)}
elif method == "get":
specs["paths"][path][method] = {**methods[method], **self.relations_retrieve_many(thy)}
elif method == "patch":
specs["paths"][path][method] = {**methods[method], **self.relations_update_many(thy)}
elif method == "delete":
specs["paths"][path][method] = {**methods[method], **self.relations_delete_many(thy)}
else:
if method == "options":
specs["paths"][path][method] = {**methods[method], **self.relations_update_options(thy)}
elif method == "post":
continue
elif method == "get":
specs["paths"][path][method] = {**methods[method], **self.relations_retrieve_one(thy)}
elif method == "patch":
specs["paths"][path][method] = {**methods[method], **self.relations_update_one(thy)}
elif method == "delete":
specs["paths"][path][method] = {**methods[method], **self.relations_delete_one(thy)}
def relations_resource(self, specs, ns, resource, urls):
"""
Overrides OpenApi specs for a Relations Resource
"""
thy = resource.thy()
specs["tags"].append({
"name": thy._model.TITLE
})
specs["components"]["schemas"].update(self.relations_schemas(thy))
self.relations_operations(specs, ns, urls, thy)
def as_dict(self):
"""
Overides swagger dict to make it OpenAPI
"""
specs = super().as_dict()
del specs["swagger"]
specs["openapi"] = "3.0.3"
specs.setdefault("components", {})
specs["components"].setdefault("schemas", {})
specs["components"]["schemas"].update({
"Field": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "name of the field"
},
"value": {
"description": "the current value of the field"
},
"original": {
"description": "the original value of the field"
},
"default": {
"description": "the default value of the field"
},
"options": {
"type": "array",
"items": {},
"description": "array of options to select from"
},
"required": {
"type": "boolean",
"description": "whether the field is required"
},
"multi": {
"type": "boolean",
"description": "whether multiple options can be selected"
},
"trigger": {
"type": "boolean",
"description": "whether to reload when this field changes"
},
"readonly": {
"type": "boolean",
"description": "whether the field is readonly"
},
"validation": {
"description": "how to validate this field"
},
"content": {
"type": "object",
"description": "used for any other data, like titles"
},
"errors": {
"type": "array",
"description": "the original value of the field"
}
}
},
"Options": {
"type": "object",
"properties": {
"fields": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Field"
}
},
"errors": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"Retrieved": {
"type": "object",
"properties": {
"overflow": {
"type": "boolean",
"description": "whether more could have been retrieved"
},
"format": {
"type": "object",
"description": "Formatting information for fields, like titles"
}
}
},
"Counted": {
"type": "object",
"properties": {
"count": {
"type": "integer",
"description": "count of those retrieved"
}
}
},
"Updated": {
"type": "object",
"properties": {
"updated": {
"type": "integer",
"description": "count of those updated"
}
}
},
"Deleted": {
"type": "object",
"properties": {
"deleted": {
"type": "integer",
"description": "count of those deleted"
}
}
}
})
for ns in self.api.namespaces:
for resource, urls, _, _ in ns.resources:
if hasattr(resource, "thy"):
self.relations_resource(specs, ns, resource, urls)
return specs
class Api(flask_restx.Api):
"""
Overrride Flask RestX API
"""
@cached_property
def __schema__(self):
"""
The Swagger specifications/schema for this API
:returns dict: the schema as a serializable dict
"""
if not self._schema:
try:
self._schema = OpenApi(self).as_dict()
except Exception: # pragma: no cover
# Log the source exception for debugging purpose
# and return an error message
msg = "Unable to render schema"
flask_restx.api.log.exception(msg) # This will provide a full traceback
return {"error": msg}
return self._schema | /relations_restx-0.6.2-py3-none-any.whl/relations_restx/api.py | 0.739799 | 0.258156 | api.py | pypi |
import relations_sql
class CRITERION(relations_sql.EXPRESSION):
"""
CRITERION class, for comparing two values
"""
LEFT = None
RIGHT = None
OPERAND = None # OPERAND to use as format string (if any)
INVERT = None # OPERAND to use as format string (if not)
PARENTHESES = False
JSONPATH = False
REVERSE = False
CAST = None
left = None # Left expression
right = None # Right expression
def __init__(self, left=None, right=None, invert=False, jsonify=False, extracted=False, **kwargs):
if invert and self.INVERT is None:
raise relations_sql.SQLError(self, "no invert without INVERT operand")
if kwargs:
left, right = list(kwargs.items())[0]
if not isinstance(left, relations_sql.SQL):
left = self.LEFT(left, jsonify=jsonify, extracted=extracted)
if self.JSONPATH and not self.CAST and isinstance(left, relations_sql.COLUMN_NAME) and left.path:
left.jsonify = jsonify = True
if not isinstance(right, relations_sql.SQL):
right = self.RIGHT(right, jsonify=jsonify)
self.left = left
self.right = right
self.invert = invert
def __len__(self):
return len(self.left) + len(self.right)
def generate(self, indent=0, count=0, pad=' ', **kwargs):
"""
Generate the left and right with operand in between
"""
sql = []
self.args = []
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ''
left, right = ('', '') if isinstance(self.right, self.RIGHT) and not self.PARENTHESES else (f"({line}{next}", f"{line}{current})")
if self.REVERSE:
self.express(self.right, sql, indent=indent, count=count+1, **kwargs)
self.express(self.left, sql, indent=indent, count=count+1, **kwargs)
sql[0] = f"{left}{sql[0]}{right}"
else:
self.express(self.left, sql, indent=indent, count=count+1, **kwargs)
self.express(self.right, sql, indent=indent, count=count+1, **kwargs)
sql[1] = f"{left}{sql[1]}{right}"
operand = self.INVERT if self.invert else self.OPERAND
if self.CAST:
sql = [self.CAST % expression for expression in sql]
self.sql = operand % tuple(sql)
class NULL(CRITERION):
"""
For IS NULL and IS NOT NULL
"""
OPERAND = "%s IS NULL"
INVERT = "%s IS NOT NULL"
JSONNULL = None
def __len__(self):
return 1
def generate(self, **kwargs):
sql = []
self.args = []
self.express(self.left, sql, **kwargs)
if isinstance(self.left, relations_sql.COLUMN_NAME) and self.left.path and self.JSONNULL is not None:
sql[0] = self.JSONNULL % sql[0]
OPERAND = self.INVERT if bool(self.right.value) == bool(self.invert) else self.OPERAND
self.sql = OPERAND % tuple(sql)
class EQ(CRITERION):
"""
For =
"""
OPERAND = "%s=%s"
INVERT = "%s!=%s"
class GT(CRITERION):
"""
For >
"""
OPERAND = "%s>%s"
class GTE(CRITERION):
"""
For >=
"""
OPERAND = "%s>=%s"
class LT(CRITERION):
"""
For <
"""
OPERAND = "%s<%s"
class LTE(CRITERION):
"""
For <=
"""
OPERAND = "%s<=%s"
class LIKE(CRITERION):
"""
For fuzzy matching
"""
OPERAND = "%s LIKE %s"
INVERT = "%s NOT LIKE %s"
BEFORE = "%"
AFTER = "%"
def __init__(self, left=None, right=None, invert=False, jsonify=False, extracted=False, **kwargs):
if kwargs:
left, right = list(kwargs.items())[0]
if right is not None:
right = f"{self.BEFORE}{right}{self.AFTER}"
super().__init__(left, right, invert=invert, jsonify=jsonify, extracted=extracted)
class START(LIKE):
"""
For fuzzy matching end of string
"""
BEFORE = ""
class END(LIKE):
"""
For fuzzy matching end of string
"""
AFTER = ""
class IN(CRITERION):
"""
For IN
"""
RIGHT = relations_sql.LIST
VALUE = relations_sql.VALUE
PARENTHESES = True
OPERAND = "%s IN %s"
INVERT = "%s NOT IN %s"
def generate(self, indent=0, count=0, pad=' ', **kwargs):
"""
Generate the left and right with operand in between
"""
if self.right:
super().generate(indent=indent, count=count, pad=pad, **kwargs)
else:
value = self.VALUE(self.invert)
value.generate(indent=indent, count=count, pad=pad, **kwargs)
self.sql = value.sql
self.args = value.args
class CONTAINS(CRITERION):
"""
Wether one set contains another
"""
LEFT = relations_sql.COLUMN_NAME
RIGHT = relations_sql.VALUE
OPERAND = "CONTAINS(%s,%s)"
class LENGTHS(CRITERION):
"""
Wether one set contains another
"""
LEFT = relations_sql.COLUMN_NAME
RIGHT = relations_sql.VALUE
OPERAND = "LENGTHS(%s,%s)" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/criterion.py | 0.637257 | 0.259392 | criterion.py | pypi |
import json
import relations_sql
class COLUMN(relations_sql.DDL):
"""
COLUMN DDL
"""
KINDS = {}
COLUMN_NAME = None
STORE = None
KIND = None
AUTO = None
EXTRACT = None
SET_DEFAULT = None
UNSET_DEFAULT = None
SET_NONE = None
UNSET_NONE = None
def name(self, definition=False):
"""
Generate a quoted name, with store as the default
"""
state = self.definition if definition or "store" not in self.migration else self.migration
return self.quote(state['store'])
def extract(self, kind, sql, **kwargs):
"""
Get extract DDL
"""
sql.append(self.KINDS.get(kind, self.KINDS["json"]))
name, path = self.split(self.migration["store"])
sql.append(self.EXTRACT % (self.PATH % (self.quote(name), self.str(self.walk(path)))))
def create(self, **kwargs):
"""
CREATE DLL
"""
sql = [self.name()]
if "__" in self.migration["store"]:
self.extract(self.migration['kind'], sql, **kwargs)
else:
if self.migration.get('auto'):
sql.append(self.AUTO)
else:
sql.append(self.KINDS.get(self.migration['kind'], self.KINDS["json"]))
if not self.migration.get('none'):
sql.append("NOT NULL")
if self.migration.get('default') is not None:
if isinstance(self.migration.get('default'), (bool, int, float, str)):
default = self.migration.get('default')
else:
default = json.dumps(self.migration.get('default'))
quote = self.STR if isinstance(default, str) else ''
sql.append(f"DEFAULT {quote}{default}{quote}")
self.sql = " ".join(sql)
def add(self, **kwargs):
"""
ADD DLL
"""
self.create()
self.sql = f"ADD {self.sql}"
def store(self, sql):
"""
Modifies the store
"""
sql.append(self.STORE % (self.name(definition=True), self.name()))
def kind(self, sql):
"""
Modifies the kind
"""
sql.append(self.KIND % (self.name(), self.KINDS.get(self.migration['kind'], self.KINDS["json"])))
def default(self, sql):
"""
Modifies the default
"""
if self.migration.get("default") is not None:
if isinstance(self.migration.get('default'), (bool, int, float, str)):
default = self.migration.get('default')
else:
default = json.dumps(self.migration.get('default'))
quote = self.STR if isinstance(default, str) else ''
sql.append(self.SET_DEFAULT % (self.name(), f"{quote}{default}{quote}"))
else:
sql.append(self.UNSET_DEFAULT % self.name())
def none(self, sql):
"""
Modifies none
"""
if self.migration["none"]:
sql.append(self.UNSET_NONE % self.name())
else:
sql.append(self.SET_NONE % self.name())
def modify(self, indent=0, count=0, pad=' ', **kwargs):
"""
MODIFY DLL
"""
sql = []
if "store" in self.migration:
self.store(sql)
if "kind" in self.migration:
self.kind(sql)
if "default" in self.migration:
self.default(sql)
if "none" in self.migration:
self.none(sql)
current = pad * (count * indent)# pylint: disable=unused-argument
delimitter = f",\n{current}" if indent else ','
self.sql = delimitter.join(sql)
def drop(self, **kwargs):
"""
DROP DLL
"""
self.sql = f"DROP {self.quote(self.definition['store'])}" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/column.py | 0.415254 | 0.152379 | column.py | pypi |
import relations_sql
class CRITERIA(relations_sql.LIST):
"""
Collection of CRITERIONS
"""
ARGS = None
DELIMITTER = None
PARENTHESES = True
expressions = None
def __init__(self, *args):
self.expressions = []
self(*args)
def __call__(self, *args):
"""
Shorthand for add
"""
self.add(*args)
def add(self, *args):
"""
Add expressiona
"""
expressions = []
if len(args) == 1 and isinstance(args[0], list):
expressions.extend(args[0])
else:
expressions.extend(args)
for expression in expressions:
if isinstance(expression, relations_sql.SQL):
self.expressions.append(expression)
else:
self.expressions.append(self.ARGS(expression))
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ''
delimitter = f"{self.DELIMITTER.rstrip()}{line}{next}" if indent else self.DELIMITTER
left, right = (f"({line}{next}", f"{line}{current})") if self.PARENTHESES else ('', '')
sql = []
self.sql = ""
self.args = []
if self:
self.express(self.expressions, sql, indent=indent, count=count+1, pad=' ', **kwargs)
self.sql = f"{left}{delimitter.join(sql)}{right}"
class AND(CRITERIA):
"""
CLAUSE for AND
"""
ARGS = relations_sql.VALUE
DELIMITTER = ' AND '
class OR(CRITERIA):
"""
CLAUSE for OR
"""
ARGS = relations_sql.VALUE
DELIMITTER = ' OR '
class SETS(relations_sql.CRITERION):
"""
For comparing sets with each other
"""
expression = None
def __len__(self):
return 1
def generate(self, **kwargs):
"""
Concats the values
"""
self.expression.generate(**kwargs)
self.sql = self.expression.sql
self.args = self.expression.args
@staticmethod
def ensure(value):
"""
Ensures the value is a list
"""
if not isinstance(value, relations_sql.SQL) and not isinstance(value, (set, list)) and value is not None:
return [value]
return value
class HAS(SETS):
"""
For if the left has all the members of right
"""
CONTAINS = relations_sql.CONTAINS
def __init__(self, left=None, right=None, invert=False, jsonify=False, extracted=False, **kwargs):
if kwargs:
left, right = list(kwargs.items())[0]
right = self.ensure(right)
self.expression = self.CONTAINS(left, right, invert=invert, jsonify=jsonify, extracted=extracted)
class ANY(SETS):
"""
For if the left has any the members of right
"""
OR = OR
LEFT = relations_sql.COLUMN_NAME
VALUE = relations_sql.VALUE
CONTAINS = relations_sql.CONTAINS
def __init__(self, left=None, right=None, invert=False, jsonify=False, extracted=False, **kwargs):
if kwargs:
left, right = list(kwargs.items())[0]
if not isinstance(left, relations_sql.SQL):
left = self.LEFT(left, jsonify=jsonify, extracted=extracted)
right = self.ensure(right)
self.expression = self.OR([self.CONTAINS(left, self.VALUE([value])) for value in right])
class ALL(SETS):
"""
For if the left and right have the same members
"""
AND = AND
CONTAINS = relations_sql.CONTAINS
LENGTHS = relations_sql.LENGTHS
def __init__(self, left=None, right=None, invert=False, jsonify=False, extracted=False, **kwargs):
right = self.ensure(right)
self.expression = self.AND(
self.CONTAINS(left, right, invert=invert, jsonify=jsonify, extracted=extracted, **kwargs),
self.LENGTHS(left, right, invert=invert, jsonify=jsonify, extracted=extracted, **kwargs)
)
class OP:
"""
Determines the criterion based on operand
"""
NOT = relations_sql.NOT
CRITERIONS = {
'null': relations_sql.NULL,
'eq': relations_sql.EQ,
'gt': relations_sql.GT,
'gte': relations_sql.GTE,
'lt': relations_sql.LT,
'lte': relations_sql.LTE,
'like': relations_sql.LIKE,
'start': relations_sql.START,
'end': relations_sql.END,
'in': relations_sql.IN,
'has': HAS,
'any': ANY,
'all': ALL
}
def __new__(cls, *args, **kwargs):
field = None
value = None
invert = kwargs.pop("INVERT", False)
jsonify = kwargs.pop("JSONIFY", False)
extracted = kwargs.pop("EXTRACTED", False)
if len(args) == 2:
field, value = args
elif len(kwargs) == 1:
field, value = list(kwargs.items())[0]
else:
raise relations_sql.SQLError(cls, f"need single pair in {kwargs} or double in {args}")
operand = "eq"
if '__' in field:
pieces = field.rsplit('__', 1)
if pieces[-1] in cls.CRITERIONS:
field, operand = pieces
elif pieces[-1].startswith('not_'):
operands = pieces[-1].split('not_', 1)
if operands[-1] in cls.CRITERIONS:
invert = True
field = pieces[0]
operand = operands[-1]
if invert and cls.CRITERIONS[operand].INVERT is None:
return cls.NOT(cls.CRITERIONS[operand](field, value, jsonify=jsonify, extracted=extracted))
return cls.CRITERIONS[operand](field, value, invert=invert, jsonify=jsonify, extracted=extracted) | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/criteria.py | 0.750827 | 0.299278 | criteria.py | pypi |
import json
import collections.abc
import relations_sql
class EXPRESSION(relations_sql.SQL):
"""
Base class for expressions
"""
def __len__(self):
return 1
def quote(self, value):
"""
Quote name if we hae quotes
"""
if self.QUOTE is not None:
return f"{self.QUOTE}{value}{self.QUOTE}"
return value
def express(self, expression, sql, **kwargs):
"""
Add this expression's generation to our own
"""
if isinstance(expression, collections.abc.Iterable):
for each in expression:
self.express(each, sql, **kwargs)
elif expression:
expression.generate(**kwargs)
sql.append(expression.sql)
self.args.extend(expression.args)
class VALUE(EXPRESSION):
"""
Class for storing a value that will need to be escaped
"""
value = None # the value
jsonify = None # whether this value will be used with JSON
def __init__(self, value, jsonify=False):
self.value = value
self.jsonify = jsonify or (value is not None and not isinstance(value, (bool, int, float, str)))
def generate(self, **kwargs):
if self.jsonify:
self.sql = self.JSONIFY % self.PLACEHOLDER
self.args = [json.dumps(sorted(list(self.value)) if isinstance(self.value, set) else self.value)]
else:
self.sql = self.PLACEHOLDER
self.args = [self.value]
class NOT(EXPRESSION):
"""
Negation
"""
VALUE = VALUE
expression = None
def __init__(self, expression):
self.expression = expression if isinstance(expression, relations_sql.SQL) else self.VALUE(expression)
def generate(self, indent=0, count=0, pad=' ', **kwargs):
self.args = []
self.express(self.expression, [], indent=indent, count=count+1, pad=pad, **kwargs)
self.sql = f"NOT {self.expression.sql}"
class LIST(EXPRESSION):
"""
Holds a list of values for IN, NOT IN, and VALUES
"""
ARG = VALUE
expressions = None
def __init__(self, expressions, jsonify=False):
self.expressions = []
self.jsonify = jsonify
for expression in expressions:
if isinstance(expression, relations_sql.SQL):
self.expressions.append(expression)
else:
self.expressions.append(self.ARG(expression, jsonify=jsonify))
def __len__(self):
return len(self.expressions)
def generate(self, indent=0, count=0, pad=' ', **kwargs):
sql = []
self.args = []
current = pad * (count * indent)
line = "\n" if indent else ''
for expression in self.expressions:
self.express(expression, sql, indent=indent, count=count+1, pad=pad, **kwargs)
self.sql = f",{line}{current}".join(sql)
class NAME(EXPRESSION):
"""
For anything that needs to be quote
"""
name = None
def __init__(self, name):
self(name)
def __len__(self):
return 1 if self.name is not None else 0
def __call__(self, name):
self.set(name)
def set(self, name):
"""
Set the NAME explicitly
"""
self.name = name
def generate(self, **kwargs):
self.sql = self.quote(self.name)
self.args = []
class SCHEMA_NAME(NAME):
"""
For schemas
"""
class TABLE_NAME(SCHEMA_NAME):
"""
For tables
"""
SCHEMA_NAME = SCHEMA_NAME
schema = None
prefix = None
def __init__(self, name, schema=None, prefix=None):
self(name, schema, prefix)
def __call__(self, name, schema=None, prefix=None):
self.set(name, schema, prefix)
def set(self, name, schema=None, prefix=None):
pieces = name.split(self.SEPARATOR)
self.name = pieces.pop(-1)
if schema is not None:
self.schema = schema if isinstance(schema, relations_sql.SQL) else self.SCHEMA_NAME(schema)
elif len(pieces) == 1:
self.schema = self.SCHEMA_NAME(pieces[0])
self.prefix = prefix
def generate(self, indent=0, count=0, pad=' ', **kwargs):
sql = []
self.args = []
if self.schema:
self.express(self.schema, sql, **kwargs)
sql.append(self.quote(self.name))
self.sql = self.SEPARATOR.join(sql)
one = pad * indent
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ' '
if self.prefix is not None:
self.sql = f"{self.prefix}{line}{next}{self.sql}" if self.prefix else f"{one}{self.sql}"
class COLUMN_NAME(TABLE_NAME):
"""
Class for storing a column that'll be used as a column
"""
TABLE_NAME = TABLE_NAME
table = None # name of the table
jsonify = None # whether we need to cast this column as JSON
path = None # path to use in the JSON
def __init__(self, name, table=None, schema=None, jsonify=False, extracted=False):
self(name, table, schema, jsonify, extracted)
def __call__(self, name, table=None, schema=None, jsonify=False, extracted=False):
self.set(name, table, schema, jsonify, extracted)
def set(self, name, table=None, schema=None, jsonify=False, extracted=False):
pieces = name.split(self.SEPARATOR)
self.name, self.path = self.split(pieces.pop(-1)) if not extracted else (pieces.pop(-1), [])
if pieces:
piece = pieces.pop(-1)
if table is None:
table = piece
if pieces:
piece = pieces.pop(-1)
if schema is None:
schema = piece
if table is not None:
self.table = table if isinstance(table, relations_sql.SQL) else self.TABLE_NAME(table, schema)
self.jsonify = jsonify
def column(self, **kwargs):
"""
Generates the column with table and schema
"""
sql = []
if self.table:
self.express(self.table, sql, **kwargs)
sql.append('*' if self.name == '*' else self.quote(self.name))
return self.SEPARATOR.join(sql)
def generate(self, **kwargs):
"""
Generates the sql and args
"""
self.args = []
column = self.column(**kwargs)
if self.path:
self.sql = self.PATH % (column, self.PLACEHOLDER)
self.args.append(self.walk(self.path))
else:
self.sql = column
if self.jsonify:
self.sql = self.JSONIFY % self.sql
class NAMES(LIST):
"""
Holds a list of column names only, with table
"""
ARG = NAME
def __init__(self, expressions):
self.expressions = []
for expression in expressions:
if isinstance(expression, relations_sql.SQL):
self.expressions.append(expression)
else:
self.expressions.append(self.ARG(expression))
class COLUMN_NAMES(NAMES):
"""
Holds a list of column names only, with table
"""
ARG = COLUMN_NAME
def __init__(self, expressions):
self.expressions = []
for expression in expressions:
if isinstance(expression, relations_sql.SQL):
self.expressions.append(expression)
else:
self.expressions.append(self.ARG(expression, extracted=True))
def generate(self, indent=0, count=0, pad=' ', **kwargs):
"""
Generates the sql and args
"""
count += 1
current = pad * (count * indent)
next = current + (indent * pad)
one = pad * indent
line = "\n" if indent else ''
delimitter = f",{line}{next}"
left, right = (f"{one}({line}{next}", f"{line}{current})")
sql = []
self.args = []
self.express(self.expressions, sql, indent=indent, count=count+1, pad=' ', **kwargs)
self.sql = f"{left}{delimitter.join(sql)}{right}"
class AS(EXPRESSION):
"""
For AS pairings
"""
NAME = NAME
label = None
expression = None
def __init__(self, label, expression):
self.label = label if isinstance(label, relations_sql.SQL) else self.NAME(label)
self.expression = expression
def __len__(self):
return len(self.label) + len(self.expression)
def generate(self, indent=0, count=0, pad=' ', **kwargs):
"""
Generates the sql and args
"""
sql = []
self.args = []
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ''
left, right = (f"({line}{next}", f"{line}{current})") if isinstance(self.expression, relations_sql.SELECT) else ('', '')
self.express(self.expression, sql, indent=indent, count=count+1, **kwargs)
self.express(self.label, sql, indent=indent, count=count+1, **kwargs)
self.sql = f"{left}{sql[0]}{right} AS {sql[1]}"
ASC = -1
DESC = 1
class ORDER(EXPRESSION):
"""
For anything that needs to be ordered
"""
EXPRESSION = COLUMN_NAME
expression = None
order = None
ORDER = {
ASC: "ASC",
DESC: "DESC"
}
def __init__(self, expression=None, order=None, **kwargs):
if kwargs:
if len(kwargs) != 1:
raise relations_sql.SQLError(self, f"need single pair in {kwargs}")
expression, order = list(kwargs.items())[0]
if order is not None and order not in self.ORDER:
raise relations_sql.SQLError(self, f"order {order} must be in {list(self.ORDER.keys())}")
self.expression = expression if isinstance(expression, relations_sql.SQL) else self.EXPRESSION(expression)
self.order = order
def __len__(self):
return len(self.expression)
def generate(self, **kwargs):
sql = []
self.args = []
if self.expression:
self.express(self.expression, sql, **kwargs)
if self.ORDER.get(self.order) is not None:
sql.append(self.ORDER[self.order])
self.sql = " ".join(sql)
class ASSIGN(EXPRESSION):
"""
For SET pairings
"""
COLUMN_NAME = COLUMN_NAME
EXPRESSION = VALUE
column = None
expression = None
def __init__(self, column, expression):
self.column = column if isinstance(column, relations_sql.SQL) else self.COLUMN_NAME(column)
self.expression = expression if isinstance(expression, relations_sql.SQL) else self.EXPRESSION(expression)
def __len__(self):
return len(self.column) + len(self.expression)
def generate(self, indent=0, count=0, pad=' ', **kwargs):
"""
Generates the sql and args
"""
sql = []
self.args = []
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ''
left, right = (f"({line}{next}", f"{line}{current})") if isinstance(self.expression, relations_sql.SELECT) else ('', '')
self.express(self.column, sql, indent=indent, count=count+1, **kwargs)
self.express(self.expression, sql, indent=indent, count=count+1, **kwargs)
self.sql = f"{sql[0]}={left}{sql[1]}{right}" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/expression.py | 0.643105 | 0.26941 | expression.py | pypi |
import relations_sql
class CLAUSE(relations_sql.CRITERIA):
"""
Base class for clauses
"""
KWARG = None
KWARGS = None
DELIMITTER = ","
PARENTHESES = False
NAME = None
query = None
def __init__(self, *args, **kwargs):
self.expressions = []
self(*args, **kwargs)
def __call__(self, *args, **kwargs):
"""
Shorthand for add
"""
return self.add(*args, **kwargs)
def add(self, *args, **kwargs):
"""
Add expressiona
"""
if len(args) == 1 and isinstance(args[0], dict) and not kwargs:
kwargs = args[0]
args = []
super().add(*args)
for key in sorted(kwargs.keys()):
if self.KWARG is None or isinstance(kwargs[key], relations_sql.SQL):
expression = kwargs[key]
else:
expression = self.KWARG(kwargs[key])
self.expressions.append(self.KWARGS(key, expression))
return self.query or self
def bind(self, query):
"""
Bind this statment to this clause for adding
"""
self.query = query
return self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
super().generate(indent=indent, count=count, pad=pad, **kwargs)
if self.sql:
one = pad * indent
current = pad * (count * indent)
next = current + one
line = "\n" if indent else ' '
self.sql = f"{self.NAME}{line}{next}{self.sql}" if self.NAME else f"{one}{self.sql}"
class ARGS(CLAUSE):
"""
Clauses that never have keyword arguments
"""
def __call__(self, *args):
"""
Shorthand for add
"""
return super().add(*args)
class OPTIONS(ARGS):
"""
Beginning of a SELECT query
"""
ARGS = relations_sql.SQL
DELIMITTER = ' '
class FIELDS(CLAUSE):
"""
FIELDS part of SELECT query
"""
ARGS = relations_sql.COLUMN_NAME
KWARG = relations_sql.COLUMN_NAME
KWARGS = relations_sql.AS
class FROM(CLAUSE):
"""
Clause for FROM
"""
NAME = "FROM"
ARGS = relations_sql.TABLE_NAME
KWARG = relations_sql.TABLE_NAME
KWARGS = relations_sql.AS
class WHERE(CLAUSE):
"""
Clause for WHERE
"""
NAME = "WHERE"
ARGS = relations_sql.VALUE
KWARGS = relations_sql.OP
DELIMITTER = " AND "
class GROUP_BY(ARGS):
"""
Clasuse for GROUP BY
"""
NAME = "GROUP BY"
ARGS = relations_sql.COLUMN_NAME
class HAVING(CLAUSE):
"""
Clause for HAVING
"""
NAME = "HAVING"
ARGS = relations_sql.VALUE
KWARGS = relations_sql.OP
DELIMITTER = " AND "
class ORDER_BY(CLAUSE):
"""
Clause for the bORDER
"""
NAME = "ORDER BY"
ARGS = relations_sql.ORDER
KWARGS = relations_sql.ORDER
class LIMIT(CLAUSE):
"""
Base class for clauses
"""
NAME = "LIMIT"
ARGS = relations_sql.VALUE
DELIMITTER = " OFFSET "
def add(self, *args, total=None, offset=None):
"""
Add total and offset
"""
if len(args) == 1 and isinstance(args[0], dict) and total is None and offset is None:
total = args[0].get("total")
offset = args[0].get("offset")
else:
if len(args) > 2 - len(self.expressions):
raise relations_sql.SQLError(self, "cannot add when LIMIT set")
args = list(args)
if args and len(self.expressions) == 0 and total is None:
total = args.pop(0)
if args and offset is None:
offset = args.pop(0)
if total is not None and not isinstance(total, int):
raise relations_sql.SQLError(self, "LIMIT total must be int")
if offset is not None and not isinstance(offset, int):
raise relations_sql.SQLError(self, "LIMIT offset must be int")
if total is not None:
self.expressions.append(self.ARGS(total))
if offset is not None:
self.expressions.append(self.ARGS(offset))
return self.query or self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
super().generate(**kwargs)
class SET(CLAUSE):
"""
relations_sql.CRITERIA for SET
"""
NAME = "SET"
KWARGS = relations_sql.ASSIGN
class VALUES(CLAUSE):
"""
relations_sql.CRITERIA for VALUES
"""
NAME = "VALUES"
ARGS = relations_sql.LIST
DELIMITTER = None
columns = None
def column(self, columns):
"""
Field the columns
"""
if self.columns:
return
if self.query:
if not self.query.COLUMNS:
self.query.column(columns)
self.columns = [expresion.name for expresion in self.query.COLUMNS.expressions]
else:
self.columns = columns
def add(self, *args, **kwargs):
"""
Add a row to VALUES
"""
if kwargs.get("COLUMNS"):
self.column(kwargs.pop("COLUMNS"))
if args and kwargs:
raise relations_sql.SQLError(self, "add list or dict but not both")
if len(args) == 1 and isinstance(args[0], dict):
kwargs = args[0]
args = []
if kwargs:
self.column(sorted(kwargs.keys()))
args = []
for column in self.columns:
if column not in kwargs:
raise relations_sql.SQLError(self, f"missing column {column} in {kwargs}")
args.append(kwargs[column])
if args:
if self.columns is not None and len(args) != len(self.columns):
raise relations_sql.SQLError(self, f"wrong values {args} for columns {self.columns}")
self.expressions.append(self.ARGS(args))
return self.query or self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
sql = []
self.args = []
count += 1
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ' '
left, right = (f"(\n{next}", f"\n{current})") if indent else ('(', ')')
delimitter = f"{right},{left}"
self.express(self.expressions, sql, indent=indent, count=count+1, pad=pad, **kwargs)
self.sql = f"{self.NAME}{line}{current}{left}{delimitter.join(sql)}{right}" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/clause.py | 0.562177 | 0.192046 | clause.py | pypi |
import collections
import relations_sql
class QUERY(relations_sql.EXPRESSION):
"""
Base query
"""
NAME = None
PREFIX = None
CLAUSES = None
clauses = None
model = None
def __init__(self, **kwargs):
self.check(kwargs)
for clause in self.CLAUSES:
if clause in kwargs:
if isinstance(kwargs[clause], self.CLAUSES[clause]):
self.clauses[clause] = kwargs[clause].bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause](kwargs[clause]).bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause]().bind(self)
def __getattr__(self, name):
"""
Used to get clauses directly
"""
if name in self.CLAUSES:
return self.clauses[name]
raise AttributeError(f"'{self}' object has no attribute '{name}'")
def __setattr__(self, name, value):
"""
Used to gset clauses directly
"""
if name in self.CLAUSES:
self.clauses[name] = value
else:
object.__setattr__(self, name, value)
def __len__(self):
return sum(len(clause) for clause in self.clauses.values())
def check(self, kwargs):
"""
Check kwargs to make sure there's nothing extra
"""
for clause in kwargs:
if clause not in self.CLAUSES:
raise TypeError(f"'{clause}' is an invalid keyword argument for {self.__class__.__name__}")
self.clauses = collections.OrderedDict()
def bind(self, model):
"""
Binds the model
"""
self.model = model
return self
def create(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.create(query=self, *args, **kwargs)
def count(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.count(query=self, *args, **kwargs)
def titles(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.titles(query=self, *args, **kwargs)
def retrieve(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.retrieve(query=self, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.update(query=self, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Create on the Model
"""
self.model.delete(query=self, *args, **kwargs)
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Generate the sql and args
"""
sql = []
self.args = []
current = pad * (count * indent)
line = "\n" if indent else ' '
delimitter = f"{line}{current}"
self.express(self.clauses.values(), sql, indent=indent, count=count, pad=" ", **kwargs)
self.sql = f"{self.NAME}{line}{current}{delimitter.join(sql)}"
class SELECT(QUERY):
"""
SELECT
"""
NAME = "SELECT"
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_sql.OPTIONS),
("FIELDS", relations_sql.FIELDS),
("FROM", relations_sql.FROM),
("WHERE", relations_sql.WHERE),
("GROUP_BY", relations_sql.GROUP_BY),
("HAVING", relations_sql.HAVING),
("ORDER_BY", relations_sql.ORDER_BY),
("LIMIT", relations_sql.LIMIT)
])
def __init__(self, *args, **kwargs):
super().__init__(**{key: value for key, value in kwargs.items() if key in self.CLAUSES})
self.FIELDS(*args, **{key: value for key, value in kwargs.items() if key not in self.CLAUSES})
def __call__(self, *args, **kwargs):
"""
Shorthand for FIELDS
"""
return self.FIELDS(*args, **kwargs)
class INSERT(QUERY):
"""
INSERT query
"""
NAME = "INSERT"
PREFIX = "INTO"
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_sql.OPTIONS),
("TABLE", relations_sql.TABLE_NAME),
("COLUMNS", relations_sql.COLUMN_NAMES),
("VALUES", relations_sql.VALUES),
("SELECT", SELECT)
])
def __init__(self, TABLE, *args, **kwargs): # pylint: disable=too-many-branches
if args:
kwargs["COLUMNS"] = [arg for arg in args if not isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, dict)]
if args:
kwargs["VALUES"] = args
self.check(kwargs)
for clause in self.CLAUSES:
if clause == "TABLE":
if isinstance(TABLE, self.CLAUSES["TABLE"]):
self.clauses[clause] = TABLE
self.clauses[clause].prefix = self.PREFIX
else:
self.clauses[clause] = self.CLAUSES[clause](TABLE, prefix=self.PREFIX)
elif clause == "COLUMNS":
if "COLUMNS" in kwargs:
if isinstance(kwargs["COLUMNS"], self.CLAUSES["COLUMNS"]):
self.clauses[clause] = kwargs["COLUMNS"]
else:
self.clauses[clause] = self.CLAUSES[clause](kwargs["COLUMNS"])
else:
self.clauses[clause] = self.CLAUSES[clause]([])
elif clause == "VALUES":
if "VALUES" in kwargs:
if isinstance(kwargs["VALUES"], self.CLAUSES["VALUES"]):
self.clauses[clause] = kwargs["VALUES"].bind(self)
self.column(self.clauses[clause].columns)
else:
self.clauses[clause] = self.CLAUSES[clause]().bind(self)
for values in kwargs["VALUES"]:
self.clauses[clause](values)
else:
self.clauses[clause] = self.CLAUSES[clause]().bind(self)
else:
if clause in kwargs:
if isinstance(kwargs[clause], self.CLAUSES[clause]):
self.clauses[clause] = kwargs[clause].bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause](kwargs[clause]).bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause]().bind(self)
def column(self, columns):
"""
Field the columns
"""
if self.COLUMNS:
return
self.COLUMNS = self.CLAUSES["COLUMNS"](columns)
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Generate the sql and args
"""
if self.VALUES and self.SELECT:
raise relations_sql.SQLError(self, "set VALUES or SELECT but not both")
super().generate(indent=indent, count=count, pad=pad, **kwargs)
class LIMITED(QUERY):
"""
Clause that has a limit
"""
def __init__(self, TABLE, **kwargs):
self.check(kwargs)
for clause in self.CLAUSES:
if clause == "TABLE":
if isinstance(TABLE, self.CLAUSES["TABLE"]):
self.clauses[clause] = TABLE
if self.PREFIX:
TABLE.prefix = self.PREFIX
else:
self.clauses[clause] = self.CLAUSES[clause](TABLE, prefix=self.PREFIX)
else:
if clause in kwargs:
if isinstance(kwargs[clause], self.CLAUSES[clause]):
self.clauses[clause] = kwargs[clause].bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause](kwargs[clause]).bind(self)
else:
self.clauses[clause] = self.CLAUSES[clause]().bind(self)
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Generate the sql and args
"""
if len(self.LIMIT) > 1:
raise relations_sql.SQLError(self, "LIMIT can only be total")
super().generate(indent=indent, count=count, pad=pad, **kwargs)
class UPDATE(LIMITED):
"""
UPDATE query
"""
NAME = "UPDATE"
PREFIX = ""
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_sql.OPTIONS),
("TABLE", relations_sql.TABLE_NAME),
("SET", relations_sql.SET),
("WHERE", relations_sql.WHERE),
("ORDER_BY", relations_sql.ORDER_BY),
("LIMIT", relations_sql.LIMIT)
])
class DELETE(LIMITED):
"""
DELETE query
"""
NAME = "DELETE"
PREFIX = "FROM"
CLAUSES = collections.OrderedDict([
("OPTIONS", relations_sql.OPTIONS),
("TABLE", relations_sql.TABLE_NAME),
("WHERE", relations_sql.WHERE),
("ORDER_BY", relations_sql.ORDER_BY),
("LIMIT", relations_sql.LIMIT)
]) | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/query.py | 0.582135 | 0.164886 | query.py | pypi |
import relations_sql
class TABLE(relations_sql.DDL):
"""
TABLE DDL
"""
NAME = None
COLUMN = None
INDEX = None
UNIQUE = None
INDEXES = None
SCHEMA = None
STORE = None
PRIMARY = None
def name(self, state="migration"):
"""
Generate a quoted name, with table as the default
"""
if isinstance(state, str):
state = {
"name": state,
"schema": state
}
definition_store = (self.definition or {}).get("store")
definition_schema = (self.definition or {}).get("schema")
migration_store = (self.migration or {}).get("store")
migration_schema = (self.migration or {}).get("schema")
if state["name"] == "migration":
store = migration_store or definition_store
else:
store = definition_store or migration_store
if state["schema"] == "migration":
schema = migration_schema or definition_schema
else:
schema = definition_schema or migration_schema
table = self.NAME(store, schema=schema)
table.generate()
return table.sql
def create(self, indent=0, count=0, pad=' ', **kwargs): # pylint: disable=too-many-locals
"""
CREATE DLL
"""
inside = []
columns = []
indexes = []
for migration in self.migration["fields"]:
if "inject" in migration:
continue
columns.append(self.COLUMN(migration=migration))
if "extract" in migration:
for extract in sorted(migration["extract"]):
store = migration["store"]
columns.append(self.COLUMN(store=f"{store}__{extract}", kind=migration["extract"][extract]))
table = {} if self.INDEXES else {"table": self.migration["store"], "schema": self.migration.get("schema")}
if self.migration.get('id') is not None and self.PRIMARY:
columns.append(relations_sql.SQL(self.PRIMARY % self.quote(self.migration['id'])))
for index in sorted(self.migration.get("index", {})):
indexes.append(self.INDEX(name=index, columns=self.migration["index"][index], **table))
for index in sorted(self.migration.get("unique", {})):
indexes.append(self.UNIQUE(name=index, columns=self.migration["unique"][index], **table))
self.express(columns, inside, indent=indent, count=count+1, pad=pad)
if self.INDEXES:
self.express(indexes, inside, indent=indent, count=count+1, pad=pad)
one = pad * indent
migration = pad * (count * indent)
next = migration + one
line = "\n" if indent else ""
delimitter = f",{line}{next}"
sql = [f"CREATE TABLE IF NOT EXISTS {self.name()} ({line}{next}{delimitter.join(inside)}{line})"]
if not self.INDEXES:
self.express(indexes, sql, indent=indent, count=count, pad=pad)
delimitter = f";\n\n{migration}"
self.sql = f"{delimitter.join(sql)};\n"
def add(self, **kwargs):
"""
ADD DLL
"""
self.create(**kwargs)
def field(self, name):
"""
Looks up a field definition
"""
for field in self.definition["fields"]:
if field["name"] == name:
return field
raise relations_sql.SQLError(self, f"field {name} not found")
def fields_add(self, columns):
"""
Process added fields into columns
"""
for migration in self.migration.get("fields", {}).get("add", {}):
if "inject" in migration:
continue
columns.append(self.COLUMN(migration=migration, added=True))
if "extract" in migration:
for extract in sorted(migration["extract"]):
store = migration.get("store", migration["name"])
columns.append(self.COLUMN(store=f"{store}__{extract}", kind=migration["extract"][extract], added=True))
def fields_change(self, columns):
"""
Process changed fields into columns
"""
for field in self.migration.get("fields", {}).get("change", {}):
migration = self.migration["fields"]["change"][field]
definition = self.field(field)
if "inject" in definition:
continue
if any(attr in migration for attr in ["name", "store", "kind", "default", "none"]):
columns.append(self.COLUMN(migration=migration, definition=definition))
if "extract" in migration:
for extract in sorted(migration["extract"]):
if extract not in definition.get("extract"):
columns.append(self.COLUMN(
migration={
"store": f"{migration.get('store', field)}__{extract}",
"kind": migration["extract"][extract]
},
added=True
))
elif migration["extract"][extract] != definition["extract"][extract]:
columns.append(self.COLUMN(
migration={
"store": f"{migration.get('store', field)}__{extract}",
"kind": migration["extract"][extract]
},
definition={
"store": f"{definition['store']}__{extract}",
"kind": definition["extract"][extract]
}
))
for extract in sorted(definition.get("extract", {})):
if extract not in migration["extract"]:
columns.append(self.COLUMN(
definition={
"store": f"{definition['store']}__{extract}",
"kind": definition["extract"][extract]
}
))
elif "extract" in definition and migration.get('store', field) != definition['store']:
for extract in sorted(definition["extract"]):
columns.append(self.COLUMN(
migration={
"store": f"{migration.get('store', field)}__{extract}",
"kind": definition["extract"][extract]
},
definition={
"store": f"{definition['store']}__{extract}",
"kind": definition["extract"][extract]
}
))
def fields_remove(self, columns):
"""
Process removed fields into columns
"""
for field in self.migration.get("fields", {}).get("remove", []):
definition = self.field(field)
if "inject" in definition:
continue
columns.append(self.COLUMN(definition=definition))
if "extract" in definition:
for extract in sorted(definition["extract"]):
columns.append(self.COLUMN(
definition={
"store": f"{definition['store']}__{extract}",
"kind": definition["extract"][extract]
}
))
def indexes_modify(self, indexes, table, unique=False):
"""
Process modified indexes
"""
index, INDEX = ("unique", self.UNIQUE) if unique else ("index", self.INDEX)
for name in self.migration.get(index, {}).get("add", {}):
indexes.append(INDEX(
migration={
"name": name,
"columns": self.migration[index]["add"][name],
"table": table
}
))
for name in self.migration.get(index, {}).get("rename", {}):
indexes.append(INDEX(
migration={
"name": self.migration[index]["rename"][name],
"table": table
},
definition={
"name": name,
"table": table
}
))
for name in self.migration.get(index, {}).get("remove", {}):
indexes.append(INDEX(
definition={
"name": name,
"table": table
}
))
def schema(self, sql):
"""
Change the schema
"""
if self.SCHEMA:
sql.append(self.SCHEMA % (self.name(state="definition"), self.quote(self.migration["schema"])))
else:
raise relations_sql.SQLError(self, "schema change not supported")
def store(self, sql):
"""
Change the schema
"""
if self.STORE:
sql.append(self.STORE % (self.name(state={"name": "definition", "schema": "migration"}), self.name()))
else:
raise relations_sql.SQLError(self, "store change not supported")
def modify(self, indent=0, count=0, pad=' ', **kwargs):
"""
MODIFY DLL
"""
sql = []
if "schema" in self.migration:
self.schema(sql)
if "store" in self.migration:
self.store(sql)
inside = []
columns = []
indexes = []
self.fields_add(columns)
self.fields_change(columns)
self.fields_remove(columns)
table = {} if self.INDEXES else {
"name": self.migration.get("name", self.definition["name"]),
"schema": self.migration.get("schema", self.definition.get("schema"))
}
self.indexes_modify(indexes, table)
self.indexes_modify(indexes, table, unique=True)
one = pad * indent
current = pad * (count * indent)
next = current + one
line = "\n" if indent else ""
delimitter = f",{line}{next}"
self.express(columns, inside, indent=indent, count=count+1, pad=pad)
if self.INDEXES:
self.express(indexes, inside, indent=indent, count=count+1, pad=pad)
if inside:
sql.append(f"ALTER TABLE {self.name()}{line or ' '}{next}{delimitter.join(inside)}")
if not self.INDEXES:
self.express(indexes, sql, indent=indent, count=count, pad=pad)
delimitter = f";\n\n{current}"
self.sql = f"{delimitter.join(sql)};\n"
def drop(self, **kwargs):
"""
DROP DLL
"""
self.sql = f"DROP TABLE IF EXISTS {self.name(state='definition')};\n" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/table.py | 0.534855 | 0.321167 | table.py | pypi |
import relations_sql
class INDEX(relations_sql.DDL):
"""
INDEX DDL
"""
TABLE = None
COLUMNS = None
CREATE = "INDEX"
MODIFY = None
def __init__(self, migration=None, definition=None, added=False, **kwargs):
super().__init__(migration=migration, definition=definition, added=added, **kwargs)
if self.migration is None:
return
if "table" in self.migration and isinstance(self.migration["table"], str):
self.migration["table"] = {"name": self.migration["table"]}
if "schema" in self.migration:
self.migration["table"]["schema"] = self.migration.pop("schema")
def name(self, definition=False, full=True):
"""
Generate a quoted name, with store as the default
"""
state = self.definition if definition else self.migration
name = state['store'] if 'store' in state else state['name']
table = self.TABLE(**state["table"]) if state.get("table") else None
if table:
name = f"{table.name}_{name}"
name = name.replace("-", "_")
sql = []
if table and full and table.schema:
sql.append(self.quote(table.schema.name))
sql.append(self.quote(name))
return self.SEPARATOR.join(sql)
def create(self, **kwargs):
"""
CREATE DLL
"""
sql = []
if "table" in self.migration:
sql.append("CREATE")
sql.append(self.CREATE)
sql.append(self.name(full=False))
if self.migration.get("table"):
table = self.TABLE(**self.migration['table'])
table.generate()
sql.append(f"ON {table.sql}")
columns = self.COLUMNS(self.migration["columns"])
columns.generate()
sql.append(columns.sql)
self.sql = " ".join(sql)
def add(self, **kwargs):
"""
ADD DLL
"""
self.create()
if "table" not in self.migration:
self.sql = f"ADD {self.sql}"
def modify(self, **kwargs):
"""
MODIFY DLL
"""
self.sql = self.MODIFY % (self.name(definition=True), self.name())
def drop(self, **kwargs):
"""
DROP DLL
"""
self.sql = f"DROP INDEX {self.name(definition=True)}"
class UNIQUE(INDEX):
"""
UNIQUE INDEX DDL
"""
TABLE = None
COLUMNS = None
CREATE = "UNIQUE" | /relations_sql-0.6.7-py3-none-any.whl/relations_sql/index.py | 0.519765 | 0.273885 | index.py | pypi |
import relations_sql
import relations_sqlite
class TABLE(relations_sqlite.DDL, relations_sql.TABLE):
"""
TABLE DDL
"""
NAME = relations_sqlite.TABLE_NAME
COLUMN = relations_sqlite.COLUMN
INDEX = relations_sqlite.INDEX
UNIQUE = relations_sqlite.UNIQUE
INSERT = relations_sqlite.INSERT
SELECT = relations_sqlite.SELECT
INDEXES = False
def name(self, state="migration", prefix='', rename=False):
"""
Generate a quoted name, with table as the default
"""
if isinstance(state, str):
state = {
"name": state,
"schema": state
}
definition_store = (self.definition or {}).get("store")
definition_schema = (self.definition or {}).get("schema")
migration_store = (self.migration or {}).get("store")
migration_schema = (self.migration or {}).get("schema")
if state["name"] == "migration":
store = migration_store or definition_store
else:
store = definition_store or migration_store
store = prefix + store
if not rename:
if state["schema"] == "migration":
schema = migration_schema or definition_schema
else:
schema = definition_schema or migration_schema
else:
schema = None
table = self.NAME(store, schema=schema)
table.generate()
return table.sql
def modify(self, indent=0, count=0, pad=' ', **kwargs): # pylint: disable=too-many-locals,too-many-branches
"""
MODIFY DLL
"""
sql = [f"""ALTER TABLE {self.name(state='definition')} RENAME TO {self.name(state='definition', prefix='_old_', rename=True)}"""]
migration = {
"store": self.migration.get("store", self.definition["store"]),
"schema": self.migration.get("schema", self.definition.get("schema")),
"fields": [],
"index": {},
"unique": {},
}
for attr in ["name", "store", "schema"]:
value = self.migration.get(attr, self.definition.get(attr))
if value is not None:
migration[attr] = value
renames = {}
for field in self.definition.get("fields", []):
if field.get("inject") or field["name"] in self.migration.get("fields", {}).get("remove", []):
continue
if field["name"] in self.migration.get("fields", {}).get("change", {}):
migration["fields"].append({**field, **self.migration["fields"]["change"][field["name"]]})
renames[self.migration["fields"]["change"][field["name"]].get("store", field["store"])] = field["store"]
else:
migration["fields"].append(field)
renames[field["store"]] = field["store"]
for field in self.migration.get("fields", {}).get("add", []):
if field.get("inject"):
continue
migration["fields"].append(field)
table = {
"name": self.definition["store"],
"schema": self.definition.get("schema")
}
indexes = []
for index in self.definition.get("index", {}):
indexes.append(self.INDEX(definition={
"name": index,
"columns": self.definition["index"][index],
"table": table
}))
if index in self.migration.get("index", {}).get("remove", []):
continue
if index in self.migration.get("index", {}).get("rename", {}):
migration["index"][self.migration["index"]["rename"][index]] = self.definition["index"][index]
else:
migration["index"][index] = self.definition["index"][index]
migration["index"].update(self.migration.get("index", {}).get("add", {}))
for index in self.definition.get("unique", {}):
indexes.append(self.UNIQUE(definition={
"name": index,
"columns": self.definition["unique"][index],
"table": table
}))
if index in self.migration.get("unique", {}).get("remove", []):
continue
if index in self.migration.get("unique", {}).get("rename", {}):
migration["unique"][self.migration["unique"]["rename"][index]] = self.definition["unique"][index]
else:
migration["unique"][index] = self.definition["unique"][index]
migration["unique"].update(self.migration.get("unique", {}).get("add", {}))
current = pad * (count * indent)
delimitter = f";\n\n{current}"
for index in indexes:
index.generate()
sql.append(index.sql)
ddl = self.__class__(migration)
ddl.generate(indent=indent, count=count, pad=pad, **kwargs)
sql.append(ddl.sql[:-2])
query = self.INSERT(
self.NAME(ddl.migration["store"], schema=ddl.migration.get("schema")), COLUMNS=sorted(renames.keys()),
SELECT=self.SELECT(FIELDS=renames).FROM(relations_sql.SQL(self.name(state='definition', prefix='_old_')))
)
query.generate(indent, count, pad, **kwargs)
sql.append(query.sql)
sql.append(f"""DROP TABLE {self.name(state='definition', prefix='_old_')}""")
self.sql = f"{delimitter.join(sql)};\n" | /relations-sqlite-0.6.2.tar.gz/relations-sqlite-0.6.2/lib/relations_sqlite/table.py | 0.425725 | 0.284974 | table.py | pypi |
import copy
import pickle
import pprint
from dataclasses import dataclass
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple, Union
from relmgr._core import _CoreRelationshipManager
class _EnforcingRelationshipManager(_CoreRelationshipManager):
"""
A stricter Relationship Manager which adds the method 'enforce'
where you register the cardinality and directionality of each relationship.
Benefits:
- When adding and removing relationships, bi directional relationships
are automatically created. (though remember, back pointer queries are
also always possible in the case of regular RelationshipManager, I think
this is more of an official wiring rather than using a back pointer concept?)
- When adding the same relationship again (by mistake?) any previous
relationship is removed first.
Parameters:
cardinality:
- "onetoone" - extinguish both old 'source' and 'target' before adding a new relationship
- "onetomany" - extinguish old 'source' before adding a new relationship
- "manytomany" (not implemented)
directionality:
- "directional" - the default, no special enforcement
- "bidirectional" - when calling `RelationshipManager.add_rel(source, target)`
causes not only the primary relationship to be created between 'source' and 'target',
but also auto creates an additional relationship in the reverse direction between 'target' and 'source'.
Also ensures both relationships are removed when calling `RelationshipManager.remove_rel`.
"""
def __init__(self):
super().__init__()
self.rules: Dict[any, Tuple] = {}
def enforce(self, relId, cardinality, directionality="directional"):
self.rules[relId] = (cardinality, directionality)
def _remove_existing_relationships(self, source, target, rel_id):
def _extinguish_old_source():
old_source = self._find_object(None, target, rel_id) # find_source
self.remove_rel(old_source, target, rel_id)
def _extinguish_old_target():
old_target = self._find_object(source, None, rel_id) # find_target
self.remove_rel(source, old_target, rel_id)
if rel_id in list(self.rules.keys()):
cardinality, directionality = self.rules[rel_id]
if cardinality == "onetoone":
_extinguish_old_source()
_extinguish_old_target()
elif cardinality == "onetomany": # and directionality == "directional":
_extinguish_old_source()
def add_rel(self, fromObj, toObj, relId=1):
self._remove_existing_relationships(fromObj, toObj, relId)
super().add_rel(fromObj, toObj, relId)
if relId in list(self.rules.keys()):
cardinality, directionality = self.rules[relId]
if directionality == "bidirectional":
super().add_rel(toObj, fromObj, relId)
def remove_rel(self, fromObj, toObj, relId=1):
super().remove_rel(fromObj, toObj, relId)
if relId in list(self.rules.keys()):
cardinality, directionality = self.rules[relId]
if directionality == "bidirectional":
super().remove_rel(toObj, fromObj, relId)
def clear(self) -> None:
super().clear()
self.rules = {} | /relationship_manager-2.0.1-py3-none-any.whl/relmgr/_enforcing.py | 0.884058 | 0.230065 | _enforcing.py | pypi |
import copy
import pickle
import pprint
from dataclasses import dataclass
from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple, Union
from relmgr._enforcing import _EnforcingRelationshipManager
from relmgr._caching import _RelationshipManagerCaching
from relmgr._persist_support import _Namespace, _PersistenceWrapper
class RelationshipManager():
"""This is the Relationship Manager class to instantiate and use in your projects."""
def __init__(self, caching: bool = True) -> None:
"""Constructor. Set the option `caching` if you want
faster performance using Python `lru_cache` technology
- defaults to True.
"""
if caching:
self.rm = _RelationshipManagerCaching()
else:
self.rm = _EnforcingRelationshipManager()
self.objects = _Namespace()
"""Optional place for storing objects involved in relationships, so the objects are saved.
Assign to this `.objects` namespace directly to record your objects
for persistence puposes.
"""
def _get_relationships(self) -> List[Tuple[object, object, Union[int, str]]]:
"""Getter"""
return self.rm._get_relationships()
def _set_relationships(self, listofrelationshiptuples: List[Tuple[object, object, Union[int, str]]]) -> None:
self.rm._set_relationships(listofrelationshiptuples)
"""Setter"""
relationships = property(_get_relationships, _set_relationships)
"""Property to get flat list of relationships tuples"""
def add_rel(self, source, target, rel_id=1) -> None:
"""Add relationships between `source` and `target` under the optional
relationship id `rel_id`. The `source` and `target` are typically Python
objects but can be strings. The `rel_id` is a string or integer and
defaults to 1. Note that `rel_id` need not be specified unless you want
to model multiple different relationships between the same objects, thus
keeping relationships in different 'namespaces' as it were.
"""
self.rm.add_rel(source, target, rel_id)
def remove_rel(self, source, target, rel_id=1) -> None:
"""Remove all relationships between `source` and `target` of type `rel_id`.
If you specify `None` for any parameter a wildcard match removal will occur.
For example:
Syntax | Meaning
--------|------
`remove_rel('a', 'b')` | remove all relationships between 'a' and 'b'
`remove_rel('a', 'b', None)` | remove all relationships between 'a' and 'b'
`remove_rel('a', 'b', 'r1')` | remove the 'r1' relationship between 'a' and 'b'
`remove_rel('a', None)` | remove all pointers (relationships) from 'a'
`remove_rel(None, 'b')` | remove any pointers (relationships) to 'b'
"""
self.rm.remove_rel(source, target, rel_id)
def find_targets(self, source, rel_id=1) -> List:
"""Find all objects pointed to by me - all the things 'source' is pointing at."""
return self.rm._find_objects(source, None, rel_id)
def find_target(self, source, rel_id=1) -> object:
"""Find first object pointed to by me - first target"""
return self.rm._find_object(source, None, rel_id)
def find_sources(self, target, rel_id=1) -> List:
"""Find all objects pointing to me. A 'back pointer' query."""
return self.rm._find_objects(None, target, rel_id)
def find_source(self, target, rel_id=1) -> object:
"""Find first object pointing to me - first source. A 'back pointer' query."""
return self.rm._find_object(None, target, rel_id)
def is_rel(self, source, target, rel_id=1) -> bool:
"""Returns T/F if relationship exists."""
return self.rm._find_objects(source, target, rel_id)
def find_rels(self, source, target) -> List:
"""Returns a list of the relationships between source and target.
Returns a list of relationship ids.
"""
return self.rm._find_objects(source, target, None)
def enforce(self, relId, cardinality, directionality="directional"):
"""Enforce a relationship by auto creating reciprocal relationships (in the case of
'bidirectional' relationships), and by overwriting existing relationships in the case
of 'onetoone' cardinality.
cardinality:
- "onetoone" - extinguish both old 'source' and 'target' before adding a new relationship
- "onetomany" - extinguish old 'source' before adding a new relationship
- "manytomany" (not implemented)
directionality:
- "directional" - the default, no special enforcement
- "bidirectional" - when calling `RelationshipManager.add_rel(source, target)`
causes not only the primary relationship to be created between 'source' and 'target',
but also auto creates an additional relationship in the reverse direction between 'target' and 'source'.
Also ensures both relationships are removed when calling `RelationshipManager.remove_rel`.
"""
self.rm.enforce(relId, cardinality, directionality)
def dumps(self) -> bytes:
"""Dump relationship tuples and objects to pickled bytes.
The `objects` attribute and all objects stored therein
(within the instance of `RelationshipManager.objects`) also get persisted.
"""
return pickle.dumps(_PersistenceWrapper(
objects=self.objects, relationships=self.relationships))
@staticmethod
def loads(asbytes: bytes): # -> RelationshipManager:
"""Load relationship tuples and objects from pickled bytes.
Returns a `RelationshipManager` instance.
"""
data: _PersistenceWrapper = pickle.loads(asbytes)
rm = RelationshipManager()
rm.objects = data.objects
rm.relationships = data.relationships
return rm
def clear(self) -> None:
"""Clear all relationships, does not affect .objects - if you want to clear that too then
assign a new empty object to it. E.g. rm.objects = Namespace()
"""
self.rm.clear()
self.objects = _Namespace()
# Util
def debug_print_rels(self):
"""Just a diagnostic method to print the relationships in the rm.
See also the `RelationshipManager.relationships` property."""
print()
pprint.pprint(self.relationships)
# Documentation
__pdoc__ = {}
__pdoc__['relmgr.relationship_manager'] = """
OOOO
"""
__pdoc__['RelationshipManager'] = """
# Welcome to Relationship Manager
A lightweight Object Database class - a central mediating class which
records all the one-to-one, one-to-many and many-to-many relationships
between a group of selected classes.
Create an instance of this class
```
rm = RelationshipManager()
```
then add relationships/pointers between any two Python objects by calling
`rm.add_rel()`. You can then make queries using e.g. `rm.find_targets()`
etc. as needed to interrogate what object points to what.
## Installation
```shell
pip install relationship-manager
```
## Usage
```python
from relmgr import RelationshipManager
rm = RelationshipManager()
rm.enforce("xtoy", "onetoone", "directional")
x = object()
y = object()
rm.add_rel(x, y, "xtoy")
assert rm.find_target(x, "xtoy") == y
```
## Constructor
Set the option `caching` if you want faster performance using Python
`lru_cache` technology - defaults to `True`.
## What is an object?
Any Python object can be used as a `source` or `target`. A pointer goes from
`source` to `target`.
You can also use strings as a `source` or `target`. This might be where you
are representing abstract relationships and need to have real Python objects
involved. E.g. `RelationshipManager.add_rel('a', 'b')`
## What is a relationship?
A relationship is a pointer from one object to another.
## What is a relationship id?
Allows you to have multiple, different relationships between two objects.
Object `a` might point to both `b` and `c` under relationship id 1 - and at
the same time `a` could point only to `c` under relationship id 2.
A `rel_id` can be an integer or descriptive string e.g. "x-to-y". The
default value of `rel_id` is 1.
## What is a 'back pointer'?
Its an implicity pointer (or relationship). For example is `a` points to `b`
then you can say `b` is pointed to by `a`. Back pointers usually need
explicit wiring and are a pain to maintain since both sides of the
relationship need to synchronise - see [Martin Fowler ‘Refactorings’
book](https://martinfowler.com/books/refactoring.html) p. 197 “Change
Unidirectional Association to Bidirectional”.
Relationship Manager makes such look-ups easy, you can add a single
relationship then simply use the query `RelationshipManager.find_sources`
passing in the target e.g. `b`.
See the official [Relationship Manager
Pattern](https://abulka.github.io/projects/patterns/relationship-manager/)
page for more discussion on this topic.
"""
__pdoc__['RelationshipManager.dumps'] = """
Persistent Relationship Manager.
Provides an attribute object called `.objects` where you can keep all the
objects involved in relationships e.g.
rm.objects.obj1 = Entity(strength=1, wise=True, experience=80)
Then when you persist the Relationship Manager both the objects and
relations are pickled and later restored. This means your objects are
accessible by attribute name e.g. rm.objects.obj1 at all times. You can
assign these references to local variables for convenience e.g.
obj1 = rm.objects.obj1
Usage:
```
# persist
asbytes = rm.dumps()
# resurrect
rm2 = RelationshipManagerPersistent.loads(asbytes)
```
""" | /relationship_manager-2.0.1-py3-none-any.whl/relmgr/relationship_manager.py | 0.87925 | 0.369244 | relationship_manager.py | pypi |
# In[1]:
def distance_space(compounds,fragments,metric):
from scipy.spatial import distance
from master_strange_mol_rep import mol_rep as strange # For arrays manipulation
if compounds.shape[1] > fragments.shape[1]:
fragments=strange.zero_pad_two_ndarrays(compounds,fragments)
else:
compounds=strange.zero_pad_two_ndarrays(fragments,compounds)
total_similarity=[]
for i in compounds:
similarity=[]
for u in fragments:
if metric.lower() == 'euclidean':
similarity.append(distance.euclidean(u,i))
elif metric.lower() == 'cosine':
similarity.append(distance.cosine(u,i))
elif metric.lower() == 'canberra':
similarity.append(distance.canberra(u,i))
elif metric.lower() == 'manhattan':
similarity.append(distance.cityblock(u,i))
else:
raise ValueError("Please choose between the Euclidean, Cosine, Canberra, and Manhattan distance metrics.")
total_similarity.append(similarity)
return total_similarity
# In[2]:
def non_pca_relative_space(total_similarity,metric):
import numpy as np
import pandas as pd
relative_rep=[]
rep_cs=np.array(total_similarity)
bb_a=np.zeros((rep_cs.shape[1],rep_cs.shape[1]))
bb_a[np.diag_indices_from(bb_a)] = 1
rep=pd.DataFrame(rep_cs)
if metric.lower() == 'euclidean':
asd=[]
bb_array=pd.DataFrame(bb_a)
for i in range(bb_a.shape[0]):
asd.append((bb_a[0]-bb_a[i])*-2)
asd.append((bb_a[1]-bb_a[2])*-2)
bb_mat=pd.DataFrame(asd)
for i in range(rep.shape[0]):
A = np.array(pd.DataFrame(np.array(bb_mat)))
b = np.array(rep.iloc[i])
qas=[]
for t in range(len(b)):
qas.append(b[0]**2-b[t]**2)
qas.append(b[1]**2-b[2]**2)
x=np.linalg.solve(A[1:]+0.001,qas[1:])
relative_rep.append(x)
elif metric.lower() == 'canberra':
for i in range(rep.shape[0]):
asdf=[]
bb_array=pd.DataFrame(bb_a)
dist=rep.iloc[i]
A=np.array(bb_array*(dist-4))
b=2-dist
for t in np.linalg.solve(A,pd.DataFrame(b)):
asdf.append(float(t))
relative_rep.append(asdf)
else:
raise ValueError("The generation of the non-PCA relative space is restricted to the utilisation of the Euclidean and Canberra distance-based spaces exclusively.")
return relative_rep
# In[3]:
def pca_relative_space(fragments,input_distance_matrix):
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA # Necessary for the creation of the PCA-based space
A=fragments
pca = PCA(n_components=A.shape[0])
Y=pca.fit_transform(A)
bbb=[]
for i in range(0,Y.shape[1],1):
aaa=[]
for u in range(0,Y.shape[0],1):
aaa.append(Y[0][i]-Y[u][i])
aaa.append(Y[1][i]-Y[2][i])
bbb.append(aaa)
E=pd.DataFrame(Y*Y)*-1
B=pd.DataFrame(input_distance_matrix)
C=B*B
aza1=[]
for u in range(0,C.shape[0],1):
aza=[]
for q in range(0,C.shape[1],1):
aza.append(C.iloc[u][0]-C.iloc[u][q]+sum(E.iloc[0])-sum(E.iloc[q]))
aza.append(C.iloc[u][1]-C.iloc[u][2]+sum(E.iloc[1])-sum(E.iloc[2]))
aza1.append(aza)
qwe=[]
for i in aza1:
qwe.append(np.linalg.solve(np.array(0.0001+pd.DataFrame(bbb).T[1:])*-2,i[1:]))
return qwe | /relative_chemical_space-0.0.6.tar.gz/relative_chemical_space-0.0.6/src/relative_chemical_space/supporting_funtions.py | 0.466846 | 0.568655 | supporting_funtions.py | pypi |
import numpy as np
def text_to_list(file_name):
"""
Convert a text file to a python list. Each element of the list
represents a line from the text file.
Parameters
----------
file_name : string
Text file name
Returns
-------
list
Loaded data as a list.
"""
with open(file_name, encoding='UTF-8', mode = 'r') as file:
data_list = [line.strip() for line in file]
return data_list
def identify_format(data_list):
"""
Identify text format.
Parameters
----------
data_list : list
Each element of the list represents a line from the text file.
Returns
-------
string
'varian' for w2CAD format, identified by the '$' character at the beginning of the file.
'ptw' for mcc fromat, identified by the word 'BEGIN_SCAN_DATA'.
'just_numbers' for data without headers.
'text_file' for other formats.
"""
if data_list[0][0] == '$':
return 'varian'
elif data_list[0] == 'BEGIN_SCAN_DATA':
return 'ptw'
else:
is_a_number = data_list[0].split()[0]
try:
float(is_a_number)
return 'just_numbers'
except ValueError:
return 'text_file'
def get_data(file_name,
start_word = None,
end_word = None,
delta = None):
"""
Get and normalize data from a text-file (file that is structured as a sequence of lines).
Since w2CAD and mcc formats are automatically detected, it is not necessary
to specify start/end words in such cases.
Parameters
----------
file_name : string
Name of the file
start_word : string
Word to identify the beginning of the data
end_word : string
Word to identify the end of the data
delta : float
Displacement in mm to define the started point
Returns
-------
ndarray
Data as a Numpy object
"""
all_list = text_to_list(file_name)
file_format = identify_format(all_list)
# w2CAD format (Varian)
if file_format == 'varian':
if '$STOM' in all_list:
start_index = all_list.index('$STOM') + 1
end_index = all_list.index('$ENOM') #Find the beginning and end of the data
elif '$STOD' in all_list:
start_index = all_list.index('$STOD') + 1
end_index = all_list.index('$ENOD')
data_list = all_list[start_index: end_index]
# Extraer datos de las lineas que comienzan con el caracter '<'
data_list = [idx[1:-1].split() for idx in data_list if idx[0] == "<"]
data_array = np.array(data_list).astype(float)
data_array[:,1] = 100*data_array[:,1]/np.amax(data_array[:,1])
# mcc format (PTW)
elif file_format == 'ptw':
start_index = all_list.index('BEGIN_DATA') + 1
end_index = all_list.index('END_DATA') #Find the beginning and end of the data
data_list = all_list[start_index: end_index]
data_list = [line.split() for line in data_list]
data_array = np.array(data_list).astype(float)
data_array[:,1] = 100*data_array[:,1]/np.amax(data_array[:,1])
data_array = data_array[:,0:2]
# User defined words to identify start and end of the data
else:
if start_word != None:
if start_word in all_list:
start_index = all_list.index(start_word) + 1
else:
print("Start word not found in the file")
else:
start_index = 0
if end_word != None:
if end_word in all_list:
end_index = all_list.index(end_word)
else:
print("End word not found in the file")
else:
end_index = len(all_list) - 1
data_list = all_list[start_index: end_index]
data_list = [line.split() for line in data_list]
data_array = np.array(data_list).astype(float)
data_array[:,1] = 100*data_array[:,1]/np.amax(data_array[:,1])
if delta != None:
data_array[:,0] = data_array[:,0] + float(delta)
return data_array
def build_from_array_and_step(array, step):
"""Create a new array with the same length but with an additional axis. The first column represents the
physical positions of the given values. The second column is a normalization of the given array.
The positions are builded with evenly step spacing starting from zero.
Parameters
----------
array : ndarrya,
Numpy 1D array with the profile values
step : float,
The spacing between samples
Returns
-------
array, ndarray
A new array with shape (M,2), where M is the shape of the array.
Examples
--------
>>> y = np.array([2,4,6,8,10])
>>> A = build_from_array_and_step(y, 0.5)
[
[0.0, 2]
[0.5, 4]
[1.0, 6]
[1.5, 8]
[2.0, 10]]
>>> y = np.arange(6)
>>> B = build_from_array_and_step(y, 3)
[
[0, 0]
[3, 1]
[6, 2]
[9, 3]
[12, 4]]
"""
num = array.shape[0]
start = 0
stop = (num - 1) * step
positions = np.linspace(start, stop, num = num, endpoint = True)
profile = np.zeros((num, 2))
profile[:,0] = positions
profile[:,1] = array / np.max(array) * 100
return profile
def gamma_1D(ref, eval, dose_t = 3, dist_t = 2, dose_threshold = 0, interpol = 1):
'''
1-dimensional gamma index calculation.
Dose profiles have to be normalized (0-100%).
Parameters
----------
ref : ndarray,
Reference dose profile represented by a (M, 2) numpy array.
eva : ndarray,
Dose profile to be evaluated, represented by a (N, 2) numpy array.
dose_t : float, default = 3
Dose tolerance [%].
dist_t : float, default = 2
Distance to agreement [mm].
dose_threshold : float, default = 0
Dose threshold [%].
Any point in the distribution with a dose value less than the threshold
is going to be excluded from the analysis.
interpol : float, default = 1
Number of interpolated points to generate between each two consecutive points in "eval" data.
Returns
-------
ndarray, float
gamma distribution, gamma percent and number of evaluated points
'''
# min_position and max_position to analyze.
min_position = np.max( (np.min(ref[:,0]), np.min([eval[:,0]])) )
max_position = np.min( (np.max(ref[:,0]), np.max([eval[:,0]])) )
num_of_points = eval.shape[0]
interp_positions = np.linspace(ref[0,0], ref[-1,0], (interpol + 1)*(num_of_points - 1) + 1, endpoint=True)
eval_from_interp_positions = np.interp(interp_positions, eval[:,0], eval[:,1], left = np.nan, right = np.nan)
add_positions = np.array((interp_positions, eval_from_interp_positions))
eval_from_interp_positions = np.transpose(add_positions)
# A variable to store gamma calculations.
gamma = np.zeros( (ref.shape[0], 2) )
gamma[:,0] = ref[:,0] #Add the same positions.
for i in range(ref.shape[0]):
if (ref[i,0] < min_position) or (ref[i,0] > max_position):
gamma[i, 1] = np.nan
continue
Gamma_appended = np.array([]) # Gamma calculation for each point in "ref" data.
for j in range(eval_from_interp_positions.shape[0]):
dose_difference = ref[i,1] - eval_from_interp_positions[j,1]
distance = ref[i,0] - eval_from_interp_positions[j,0]
Gamma = np.sqrt(
(distance**2) / (dist_t**2)
+ (dose_difference**2) / (dose_t**2))
Gamma_appended = np.append(Gamma_appended, Gamma)
gamma[i,1] = np.min( Gamma_appended[ ~np.isnan(Gamma_appended) ] )
if ref[i,1] < dose_threshold:
gamma[i,1] = np.nan
# Coordinates for gamma values <= 1.
less_than_1_coordinate = np.where(gamma[:,1] <= 1)
# Number of points where gamma <= 1.
less_than_1 = np.shape(less_than_1_coordinate)[1]
# Number evaluated points (!= nan)
evaluated_points = np.shape(gamma)[0] - np.shape(np.where(np.isnan(gamma[:,1])))[1]
gamma_percent = float(less_than_1)/evaluated_points*100
return gamma, gamma_percent, evaluated_points
if __name__ == '__main__':
"""
y = np.array([2,4,6,8,10])
A = build_from_array_and_step(y, 0.5)
print(A)
y = np.arange(10)
B = build_from_array_and_step(y, 3)
print(B)
"""
#Test files
file_name = './test_data/test_ptw.mcc'
#file_name = './test_data/test_varian.data'
#file_name = './test_data/test_txt.txt'
file_name_eval = "./test_data/X06 OPEN 10X10 PDD WAT 221214 13'13'42.mcc"
data_ref = get_data(file_name, start_word = 'Field 1')
data_eval = get_data(file_name_eval)
g, gp = gamma_1D(data_ref, data_eval)
print(gp) | /relative_dose_1d-0.1.7.tar.gz/relative_dose_1d-0.1.7/src/relative_dose_1d/tools.py | 0.861101 | 0.620219 | tools.py | pypi |
from matplotlib.figure import Figure
from matplotlib.backends.backend_qtagg import FigureCanvas
from PyQt6.QtWidgets import (QApplication, QWidget, QLabel, QLineEdit, QHBoxLayout,
QPushButton, QMessageBox, QFileDialog, QVBoxLayout,
QFormLayout, QInputDialog, QMainWindow, QDialog)
from PyQt6.QtCore import Qt, QCoreApplication
import numpy as np
from relative_dose_1d.tools import identify_format, get_data, gamma_1D, build_from_array_and_step
import sys
import os
import copy
class GUI(QDialog):
def __init__(self, D_ref = None, D_eval = None, parent=None):
"""Constructor for a graphical user interface (GUI). Data has to be in 2 columns,
corresponding to positions and dose values, respectively.
Parameters
----------
D_ref : ndarray,
Reference dose profile represented by a (M, 2) numpy array.
D_eva : ndarray,
Dose profile to be evaluated, represented by a (N, 2) numpy array.
Returns
-------
A PyQt widget to showing dose profiles, gamma analysis and dose difference.
"""
super().__init__(parent=parent)
self.D_ref = D_ref
self.D_eval = D_eval
self.initializeUI()
def initializeUI(self):
"""Set up the apllication"""
"x, y, width, height"
self.setGeometry(200,100,1000,400)
self.setWindowTitle("Relative dose 1D")
self.set_up_window()
self.set_up_data()
def set_up_window(self):
"Layouts definition"
self.main_box_layout = QHBoxLayout()
self.v_box_layout = QVBoxLayout()
self.settings_layout_v = QVBoxLayout()
self.Q_grafica = Q_Base_Figure()
self.main_box_layout.addLayout(self.settings_layout_v)
self.main_box_layout.addLayout(self.v_box_layout)
self.setLayout(self.main_box_layout)
self.open_file_button = QPushButton("Load a text file", self)
self.open_file_button.clicked.connect(self.open_file_button_clicked)
self.clear_button = QPushButton("Clear", self)
self.clear_button.clicked.connect(self.clear_data_and_plots)
self.button_factor = QPushButton("Scale factor", self)
self.button_factor.clicked.connect(self.factor_button_clicked)
self.button_factor.setFixedSize(80, 40)
self.button_factor.setEnabled(False)
self.button_origin = QPushButton("Move origin", self)
self.button_origin.clicked.connect(self.move_button_clicked)
self.button_origin.setFixedSize(80, 40)
self.button_origin.setEnabled(False)
axis_label = QLabel("Axis position")
#axis_label.setFont(QFont(Arial, 15))
self.settings_layout_v.addWidget(axis_label, alignment = Qt.AlignmentFlag.AlignHCenter)
self.settings_layout_v.addWidget(self.button_factor)
self.settings_layout_v.addWidget(self.button_origin)
self.settings_layout_v.addWidget(QLabel("Gamma", alignment = Qt.AlignmentFlag.AlignHCenter))
gammaLayout = QFormLayout()
self.dose_t_QLine = QLineEdit()
self.dose_t_QLine.setFixedWidth(40)
self.dose_t_QLine.setText("3.0")
self.DTA_t_QLine = QLineEdit()
self.DTA_t_QLine.setFixedWidth(40)
self.DTA_t_QLine.setText("2.0")
self.thres_QLine = QLineEdit()
self.thres_QLine.setFixedWidth(40)
self.thres_QLine.setText("0.0")
self.interp_QLine = QLineEdit()
self.interp_QLine.setFixedWidth(40)
self.interp_QLine.setText("1")
gammaLayout.addRow("Dose [%]:", self.dose_t_QLine)
gammaLayout.addRow("DTA [mm]:", self.DTA_t_QLine)
gammaLayout.addRow("Threshold [%]:", self.thres_QLine)
gammaLayout.addRow("Interp.:", self.interp_QLine)
self.gamma_button = QPushButton("Apply")
self.gamma_button.clicked.connect(self.calc_difference_and_gamma)
self.gamma_button.setFixedSize(120, 40)
#self.button_origin.setEnabled(False)
results_label = QLabel("Results", alignment = Qt.AlignmentFlag.AlignHCenter)
self.gamma_rate_label = QLabel("Pass rate: ")
self.total_points_label = QLabel("Total points: ")
self.evaluated_points_label = QLabel("Evaluated ponits: ")
self.settings_layout_v.addLayout(gammaLayout)
self.settings_layout_v.addWidget(self.gamma_button)
self.settings_layout_v.addWidget(results_label)
self.settings_layout_v.addWidget(self.gamma_rate_label)
self.settings_layout_v.addWidget(self.total_points_label)
self.settings_layout_v.addWidget(self.evaluated_points_label)
self.settings_layout_v.addStretch()
self.v_box_layout.addWidget(self.open_file_button)
self.v_box_layout.addWidget(self.clear_button)
self.v_box_layout.addWidget(self.Q_grafica.Qt_fig)
def set_up_data(self):
if self.D_ref is None:
self.loaded_data = []
else:
self.loaded_data = [self.D_ref, self.D_eval]
self.Q_grafica.plot_data(self.D_ref)
self.Q_grafica.plot_data(self.D_eval)
self.calc_difference_and_gamma()
# Button's functions
def open_file_button_clicked(self):
self.last_file_name, _ = QFileDialog.getOpenFileName()
_ , extension = os.path.splitext(self.last_file_name)
if self.last_file_name:
with open(self.last_file_name, encoding='UTF-8', mode = 'r') as file:
all_list = [line.strip() for line in file]
format = identify_format(all_list)
if format == 'text_file':
self.show_new_window() #New window for input user parameters.
else:
data = get_data(self.last_file_name)
self.load_data(data)
def clear_data_and_plots(self):
self.Q_grafica.ax_perfil.clear()
self.Q_grafica.ax_perfil_resta.clear()
self.Q_grafica.ax_gamma.clear()
self.Q_grafica.fig.canvas.draw()
self.open_file_button.setEnabled(True)
self.loaded_data = []
def clear_gamma(self):
self.Q_grafica.ax_perfil_resta.clear()
self.Q_grafica.ax_gamma.clear()
def factor_button_clicked(self):
scale_factor, ok = QInputDialog.getText(self, 'Scale factor', 'Scale factor:')
try:
scale_factor = float(scale_factor)
if ok:
self.loaded_data[-1][:,0] = self.loaded_data[-1][:,0] * scale_factor
cache_data = copy.deepcopy(self.loaded_data)
self.clear_data_and_plots()
for data in cache_data:
self.load_data(data)
except ValueError:
QMessageBox().critical(self, "Error", "Enter a number.")
print('Error, give a number.')
def move_button_clicked(self):
delta, ok = QInputDialog.getText(self, 'Scale factor', 'Origin displacement:')
try:
delta = float(delta)
if ok:
self.loaded_data[-1][:,0] = self.loaded_data[-1][:,0] + delta
cache_data = copy.deepcopy(self.loaded_data)
self.clear_data_and_plots()
for data in cache_data:
self.load_data(data)
except ValueError:
QMessageBox().critical(self, "Error", "Enter a number.")
print('Error, give a number.')
def show_new_window(self):
start_word, ok = QInputDialog.getText(self, 'Text Input Dialog', 'Start word:')
if ok:
data = get_data(self.last_file_name, start_word)
else:
data = get_data(self.last_file_name)
self.load_data(data)
# Additional functions
def load_data(self, data):
self.loaded_data.append(data)
self.Q_grafica.plot_data(data)
self.button_factor.setEnabled(True)
self.button_origin.setEnabled(True)
if len(self.loaded_data) == 2:
self.calc_difference_and_gamma()
def calc_difference_and_gamma(self):
data_A = self.loaded_data[0]
data_B = self.loaded_data[1]
# Using interpolation, new values of B are computed at positions given by A.
data_B_from_A_positions = np.interp(data_A[:,0], data_B[:,0], data_B[:,1], left = np.nan)
difference = data_A[:,1] - data_B_from_A_positions
added_positions = np.array((data_A[:,0], difference))
values = np.transpose(added_positions)
g, g_percent, evaluated_points = gamma_1D(
data_A,
data_B,
dose_t = float(self.dose_t_QLine.text()),
dist_t = float(self.DTA_t_QLine.text()),
dose_threshold = float(self.thres_QLine.text()),
interpol = int(self.interp_QLine.text()),
)
self.Q_grafica.plot_resta(values)
self.Q_grafica.ax_gamma.clear()
#self.Q_grafica.ax_gamma = self.Q_grafica.ax_perfil_resta.twinx()
self.Q_grafica.plot_gamma(g)
#self.Q_grafica.ax_gamma.set_ylabel('gamma')
self.gamma_rate_label.setText(f"Pass rate: {g_percent:0.1f}%")
self.total_points_label.setText(f"Total points: {data_A.shape[0]:0.1f}")
self.evaluated_points_label.setText(f"Evaluated ponits: {evaluated_points:0.1f}")
class Q_Base_Figure:
def __init__(self):
self.fig = Figure(figsize=(40,4), tight_layout = True, facecolor = 'whitesmoke')
self.Qt_fig = FigureCanvas(self.fig)
# Axes para la imagen
self.ax_perfil = self.fig.add_subplot(1, 2, 1)
self.ax_perfil.set_ylabel('Percentage [%]')
self.ax_perfil.set_xlabel('Distance [mm]')
self.ax_perfil.grid(alpha = 0.3)
self.ax_perfil_resta = self.fig.add_subplot(1, 2, 2)
self.ax_perfil_resta.set_ylabel('Percentage [%]')
self.ax_perfil_resta.set_xlabel('Distance [mm]')
self.ax_perfil_resta.grid(alpha = 0.3)
self.ax_gamma = self.ax_perfil_resta.twinx()
self.ax_gamma.set_ylabel('gamma')
#self.ax_gamma.set_ylim((0, 2))
def plot_data(self, data):
x = data[:,0]
y = data[:,1]
self.ax_perfil.plot(x, y)
self.ax_perfil.set_ylabel('Percentage [%]')
self.ax_perfil.set_xlabel('Distance [mm]')
self.ax_perfil.grid(alpha = 0.3)
#self.ax_perfil.legend()
self.fig.canvas.draw()
def plot_resta(self, data):
x = data[:,0]
y = data[:,1]
self.ax_perfil_resta.plot(x, y, color='r', label = 'Difference', alpha = 0.7)
self.ax_perfil_resta.set_ylabel('Difference')
self.ax_perfil_resta.set_xlabel('Distance [mm]')
self.ax_perfil_resta.grid(alpha = 0.4)
self.ax_perfil_resta.legend(loc = 'upper left')
self.fig.canvas.draw()
def plot_gamma(self, data):
x = data[:,0]
y = data[:,1]
self.ax_gamma.plot(x, y, color='g', label = 'gamma', marker = '.')
self.ax_gamma.plot(x, np.ones(x.shape[0]), 'g--', alpha = 0.5, linewidth=2)
self.ax_gamma.set_ylabel('gamma')
self.ax_gamma.yaxis.set_label_position("right")
self.ax_gamma.legend(loc = 'upper right')
self.fig.canvas.draw()
def plot(D_ref, D_eval):
"""
A function to show a graphical user interface (GUI) to showing 1D dose profiles,
gamma analysis and dose difference. Data has to be in 2 columns,
corresponding to positions and dose values, respectively.
Parameters
----------
D_ref : ndarray,
Reference dose profile represented by a (M, 2) numpy array.
D_eva : ndarray,
Dose profile to be evaluated, represented by a (N, 2) numpy array.
Returns
-------
A GUI showing dose profiles, gamma analysis and dose difference.
Examples
--------
>>> from relative_dose_1d.GUI_tool import plot
>>> from relative_dose_1d.tools import build_from_array_and_step
>>> import numpy as np
>>> a = np.array([0,1,2,3,4,5,6,7,8,9,10])
>>> b = a + np.random.random_sample((11,))
>>> A = build_from_array_and_step(a, 1)
>>> B = build_from_array_and_step(b, 1)
>>> w = plot(A,B)
"""
if not QCoreApplication.instance():
app = QApplication(sys.argv)
window = GUI(D_ref, D_eval)
window.show()
sys.exit(app.exec())
else:
"""This condition is used when external applications call to plot function."""
return GUI(D_ref, D_eval)
def run_demo():
a = np.array([0,1,2,3,4,5,6,7,8,9,10])
b = a + np.random.random_sample((11,))
A = build_from_array_and_step(a, 1)
B = build_from_array_and_step(b, 1)
plot(A,B)
if __name__ == '__main__':
a = np.array([0,1,2,3,4,5,6,7,8,9,10])
b = a + np.random.random_sample((11,))
A = build_from_array_and_step(a, 1)
B = build_from_array_and_step(b, 1)
app = QApplication(sys.argv)
window = GUI(A, B)
sys.exit(app.exec()) | /relative_dose_1d-0.1.7.tar.gz/relative_dose_1d-0.1.7/src/relative_dose_1d/GUI_tool.py | 0.680772 | 0.213726 | GUI_tool.py | pypi |
import datetime
import time
def relative_to_now(start_date, no_error=False):
"""Verify date and return converted."""
if isinstance(start_date, datetime.datetime):
if start_date.tzinfo:
diff = datetime.datetime.now(start_date.tzinfo) - start_date
else:
diff = datetime.datetime.now() - start_date
abs_seconds = diff.days * 3600 * 24 + diff.seconds
elif isinstance(start_date, datetime.date):
diff = datetime.date.today() - start_date
abs_seconds = diff.days * 3600 * 24 + diff.seconds
elif isinstance(start_date, float): # noqa: SIM106
abs_seconds = int(time.time() - start_date)
else:
if no_error:
return start_date
raise Exception("type must be date or datetime.")
return convert(abs_seconds, start_date)
def build_string(unit, value, my_text):
"""Build string from params."""
unit = unit if value == 1 else unit + "s"
return f"{value} {unit} {my_text}"
def convert(abs_seconds, output):
"""Convert date to string."""
my_text = "ago" if abs_seconds > 0 else "from now"
abs_seconds = abs(abs_seconds)
seconds = abs_seconds // 1
minutes = abs_seconds // 60
hours = abs_seconds // 3600
days = abs_seconds // (3600 * 24)
weeks = abs_seconds // (3600 * 24 * 7)
months = int(abs_seconds // (3600 * 24 * 7 * (365 / 12 / 7)))
years = abs_seconds // (3600 * 24 * 365)
if seconds == 0:
output = "just now"
elif seconds < 60:
output = build_string("second", seconds, my_text)
elif minutes < 60:
output = build_string("minute", minutes, my_text)
elif hours < 24:
output = build_string("hour", hours, my_text)
elif days < 7:
output = build_string("day", days, my_text)
elif weeks < 5:
output = build_string("week", weeks, my_text)
elif months < 12:
output = build_string("month", months, my_text)
elif years >= 1:
output = build_string("year", years, my_text)
return output | /relative_to_now-1.1.0-py3-none-any.whl/RelativeToNow/__init__.py | 0.578805 | 0.297938 | __init__.py | pypi |
from dataclasses import dataclass
from relativisticpy.indices.base import BaseTensorIndices
@dataclass
class MetricIndices(BaseTensorIndices):
def __post_init__(self):
pass
def __mul__(self, other : BaseTensorIndices):
"""Perform index raising and lowering."""
if not isinstance(other, BaseTensorIndices):
raise TypeError(f"unsupported operand type(s) for *: '{type(self)}' and '{type(other)}'")
if self.dimention != other.dimention:
raise ValueError("tensors have different dimensions")
summed_index = None
indices = []
for index in self.indices:
if index in other.indices:
if summed_index is not None:
raise ValueError("tensors have more than one common index")
summed_index = index
else:
indices.append(index)
for i, index in enumerate(other.indices):
if index not in self.indices:
indices.append(index)
if summed_index is None:
raise ValueError("tensors do not have a common index")
# Determine the factor for index raising or lowering
if summed_index.contravariant:
factor = -1
else:
factor = 1
# Compute the new basis and valid flag
new_basis = self.basis
valid = self.valid and other.valid
for index in indices:
if index.contravariant:
new_basis = new_basis.contract(index, factor)
else:
new_basis = new_basis.contract(index, -factor)
if summed_index.contravariant:
new_basis = new_basis.inv()
self_summed = True
elif summed_index.covariant:
self_summed = True
return MetricIndices(
indices=indices,
basis=new_basis,
dimention=self.dimention,
rank=self.rank,
scalar=self.scalar and other.scalar,
shape=tuple(len(self.indices), len(other.indices)),
valid=valid,
self_summed=self_summed,
parent_tensor=self.parent_tensor
) | /indices/metric_indices.py | 0.8321 | 0.32857 | metric_indices.py | pypi |
from operator import itemgetter
import itertools as it
from relativisticpy.shared.helpers.helpers import transpose_list
class TensorSelfSummed:
def __init__(self, indices):
self.indices = indices
self.resulting_indices = self.indices.get_self_summed_contex()
def all_component(self):
ne = [[i.order, j.order] for i, j in it.combinations(self.indices.indices, r=2) if i*j]
repeated_index_locations = transpose_list(ne)
return [indices for indices in list(self.indices) if itemgetter(*repeated_index_locations[0])(indices) == itemgetter(*repeated_index_locations[1])(indices)]
def component(self, components = None):
new = self.resulting_indices.result
old = self.indices
ne = [[new_index.order, old_index.order] for new_index,old_index in it.product(self.indices.get_self_summed_contex().result.indices, self.indices.get_self_summed_contex().old_indices.indices) if new_index+old_index]
repeated_index_locations = transpose_list(ne)
if not new.scalar and components != None:
return [indices for indices in self.all_component() if itemgetter(*repeated_index_locations[1])(indices) == itemgetter(*repeated_index_locations[0])(components)]
else:
return self.all_component()
class TensorIndicesArithmetic:
def __init__(self, indicesA, indicesB):
self.indicesA = indicesA
self.indicesB = indicesB
self.product_object = self.indicesA + self.indicesB
def all_component(self):
ne = [[i.order, i.context.repeated_index.order] for i in self.product_object.parentA.indices]
repeated_index_locations = transpose_list(ne)
return [(IndexA, IndexB) for (IndexA, IndexB) in list(it.product(self.indicesA, self.indicesB)) if itemgetter(*repeated_index_locations[0])(IndexA) == itemgetter(*repeated_index_locations[1])(IndexB)]
def component(self, components = None):
res = self.product_object
if not res.result.scalar and components != None:
return [(IndicesA, IndicesB) for (IndicesA, IndicesB) in self.all_component() if IndicesA == tuple(components)]
else:
return self.all_component()
class TensorIndicesProduct:
def __init__(self, indicesA, indicesB):
self.indicesA = indicesA
self.indicesB = indicesB
self.product_object = self.indicesA * self.indicesB
def all_component(self):
"""
Returns combinatorial of indices, where summed index locations are matched.
"""
ne = [[i.order, i.context.summed_index.order] for i in self.product_object.parentA.indices if i.summed]
summed_index_locations = transpose_list(ne)
return [(IndexA, IndexB) for (IndexA, IndexB) in list(it.product(self.indicesA, self.indicesB)) if itemgetter(*summed_index_locations[0])(IndexA) == itemgetter(*summed_index_locations[1])(IndexB)]
def return_result_locations(self):
res = self.product_object
result_dictionary = {'result_indices_matching_with_A' : [], 'indices_A_matched_with_result' : [], 'result_indices_matching_with_B' : [], 'indices_B_matched_with_result' : []}
for i in res.result.indices:
for j in i.context.child_index:
if i.symbol == j.symbol:
if j.parent == 'Parent_A':
result_dictionary['result_indices_matching_with_A'].append(i.order)
result_dictionary['indices_A_matched_with_result'].append(j.order)
elif j.parent == 'Parent_B':
result_dictionary['result_indices_matching_with_B'].append(i.order)
result_dictionary['indices_B_matched_with_result'].append(j.order)
return result_dictionary
def component(self, components = None):
res = self.product_object
index_locations = self.return_result_locations()
result_indices_in_A = index_locations['result_indices_matching_with_A']
result_indices_in_B = index_locations['result_indices_matching_with_B']
A_indices_not_summed = index_locations['indices_A_matched_with_result']
B_indices_not_summed = index_locations['indices_B_matched_with_result']
if not res.result.scalar and components != None:
if len(A_indices_not_summed) != 0 and len(B_indices_not_summed) != 0:
return [(IndicesA, IndicesB) for (IndicesA, IndicesB) in self.all_component() if itemgetter(*A_indices_not_summed)(IndicesA) == itemgetter(*result_indices_in_A)(components) \
and itemgetter(*B_indices_not_summed)(IndicesB) == itemgetter(*result_indices_in_B)(components)]
elif len(A_indices_not_summed) == 0 and len(B_indices_not_summed) != 0:
return [(IndicesA, IndicesB) for (IndicesA, IndicesB) in self.all_component() if itemgetter(*B_indices_not_summed)(IndicesB) == itemgetter(*result_indices_in_B)(components)]
elif len(B_indices_not_summed) == 0 and len(A_indices_not_summed) != 0:
return [(IndicesA, IndicesB) for (IndicesA, IndicesB) in self.all_component() if itemgetter(*A_indices_not_summed)(IndicesB) == itemgetter(*result_indices_in_A)(components)]
elif not res.result.scalar and components != None and self.operation in ['+', '-']:
return [(IndicesA, IndicesB) for (IndicesA, IndicesB) in self.all_component() if IndicesA == tuple(components)]
else:
return self.all_component() | /indices/products.py | 0.625667 | 0.355579 | products.py | pypi |
relativity
==========
multi-index data structures
Motivation
----------
We're going to take a mental journey of discovery to see why
relativity was written, and how you can use it to simplify
some of the most difficult problems that come up regularly
when programming. Rather then leaping straight from programming
with python's standard data structures to programming with
relativistic data structures, we'll get a running start
by programming in a version of python that is missing
key data structures. Then, we will draw a line from this
deficient bad version of python to regular python, and
then extend that line on into relativity.
Dict to List
''''''''''''
Imagine programming without hashmaps. For example, let's say we have
a list of ``Restaurant`` objects and ``City`` objects, and we want to
get how many ``Restaurants`` are in each ``City``.
Normally this is simple:
.. code-block:: python
restaurants_in_city = {}
for restaurant in restaurants:
city = restaurant.city
restaurants_in_city[city] = restaurants_in_city.get(city, 0) + 1
def get_restaurant_count(city):
return restaurants_in_city.get(city, 0)
But, imagine how you would approach the problem if the only available
data structure was a list.
.. code-block:: python
cities = []
restaurants_in_city = []
for restaurant in restaurants:
missing = True
for idx, city in enumerate(cities):
if city == restaurant.city:
restaurants_in_city[idx] += 1
missing = False
if missing:
cities.append(restaurant.city)
restaurants_in_city.append(1)
def get_restaurant_count(city):
for idx, city2 in enumerate(cities):
if city == city2:
return restaurants_in_city[idx]
return 0
Comparing the two examples, there are a few key differences:
- there are more low value local values (``idx``)
- single data structures split into multiple, which must
then be kept in sync
- the code is longer, therefore harder to read,
modify, and debug
Let's leave this dystopian data structure wasteland behind
for now and go back to regular python.
Dict to M2M
'''''''''''
The same differences that showed up when programming with
and without hashmaps will come up again when comparing
programming with single-index hashmaps to relativistic
multi-index hashmaps.
Returning to the restaurants and cities example, what if
a restaurant can have multiple locations and we need to
keep track of which cities each restaurant is in,
as well as which restaurants are in each city.
Note that we allow a restaurant to have multiple
locations within the same city, so sets must be used
to avoid double counting.
.. code-block:: python
restaurants_in_city = {}
cities_of_restaurant = {}
for restaurant in restaurants:
for location in restaurant.locations:
restaurants_in_city.setdefault(location.city, set()).add(restaurant)
cities_of_restaurant.setdefault(restaurant, set()).add(location.city)
def get_restaurants_in_city(city):
return restaurants_in_city.get(city, set())
def get_cities_of_restaurant(restaurant):
return cities_of_restaurant.get(restaurant, set())
Relativity's most basic data structure is a many-to-many
mapping ``M2M``. ``M2M`` is a systematic abstraction over
associating every key with a set of values, and every
value with a set of keys. See how ``M2M`` simplifies
the problem:
.. code-block:: python
restaurant_city_m2m = M2M()
for restaurant in restaurants:
for location in restaurant.locations:
restaurant_city_m2m.add(restaurant, location.city)
get_restaurants_in_city = restaurant_city_m2m.inv.get
get_cities_of_restaurant = restaurant_city_m2m.get
Recall that the advantages of having single-index hashmaps
were shorter code, with fewer long lived data structures
and fewer local values. ``M2M`` doesn't replace ``dict``
any more than ``dict`` replaces ``list``. Rather it is
a new layer of abstraction that can greatly simplify
a broad class of problems.
Is it possible to go further? Are there higher levels
of abstraction that can represent more complex relationships
in fewer data structures, and be manipulated with fewer
lines of code and intermediate values?
M2M to M2MGraph
'''''''''''''''
Where relativity really shines is releiving the programmer
of the burden of keeping data structures consistent with updates.
Let's consider our restaurant example if we need to be able
to add and remove locations one at a time and still be able
to query.
With ``M2M`` objects, the problem is doable, but fiddly to
implement:
.. code-block:: python
restaurant_location = M2M()
location_city = M2M()
def add_location(location):
restaurant_location.add(location.restaurant, location)
location_city.add(location, location.city)
def remove_location(location):
del location_city[location]
del restaurant_location.inv[location]
def restaurants_in_city(city):
restaurants = set()
for location in location_city.inv[city]:
for restaurant in restaurant_location.inv[location]:
restaurants.add(restaurant)
return restaurants
def cities_of_restaurant(restaurant):
cities = set()
for location in restaurant_location[restaurant]:
for city in location_city[location]:
cities.add(city)
return cities
This problem can be simplified by stepping up a level of
abstraction.
Where ``M2M`` is a data structure of keys and values, ``M2MGraph``
is a higher-level data structure of ``M2M`` s.
With ``M2MGraph``, this problem becomes simple and
intuitive:
.. code-block:: python
data = M2MGraph([('restaurant', 'location'), ('location', 'city')])
def add_location(location):
data['restaurant', 'location', 'city'].add(
location.restaurant, location, location.city)
def remove_location(location):
data.remove('location', location)
def restaurants_in_city(city):
return data.pairs('city', 'restaurant').get(city)
def cities_of_restaurant(restaurant):
return data.pairs('restaurant', 'city').get(restaurant)
Introducing Chain
'''''''''''''''''
Graphs are good for representing arbitrary sets of data, but they
are awkward to query overy. ``M2MChain``s sequences of ``M2M``s, where
the keys of ``M2M`` n are meant to be drawn from the same pool
as the values of ``M2M`` n - 1.
A simple way to construct a chain is with the ``chain`` helper function.
.. code-block:: python
students2classes = M2M([
('alice', 'math'),
('alice', 'english'),
('bob', 'english'),
('carol', 'math'),
('doug', 'chemistry')])
classmates = chain(students2clases, students2classes.inv)
By chaining the student:class map to itself, we can easily
query which students have classes together.
.. code-block:: python
>>> classmates.only('alice')
M2MChain([M2M([('alice', 'math'), ('alice', 'english')]), M2M([('math', 'carol'), ('math', 'alice'), ('english', 'bob'), ('english', 'alice')])])
>>> classmates.only('alice').m2ms[1]
M2M([('math', 'carol'), ('math', 'alice'), ('english', 'bob'), ('english', 'alice')])
>>> classmates.only('alice').m2ms[1].inv.keys()
['bob', 'carol', 'alice']
Relativity and DataBases
------------------------
Relativity is excellent at representing many-to-many relationships
from databases which are otherwise awkward to handle.
M2M + ORM
'''''''''
Let's consider an example from Django to start.
.. code-block:: python
from django.db import models
class Student(models.model):
name = models.StringField()
class Course(models.model):
name = models.StringField()
students = models.ManyToMany(Student)
Students take many courses, and each course has many students.
Construting an ``M2M`` over these relationships is very natural:
.. code-block:: python
from relativity import M2M
StudentCourse = Course.students.through
enrollments = M2M(
StudentCourse.objects.all().values_list('student', 'course'))
Design Philosophy
-----------------
DB Feature Sets
'''''''''''''''
A typical SQL database, such as PostGres, MySQL, SQLServer, Oracle, or DB2
offers many features which can be split into four categories:
- relational data model and queries
- network protocol and multiple concurrent connections
- transactions, atomic updates, and MVCC_
- persistent storage, backups, and read replicas
Let's call these "relational", "network", "transactional",
and "persistence" feature sets.
.. _MVCC: https://en.wikipedia.org/wiki/Multiversion_concurrency_control
"Alternative" Databases
'''''''''''''''''''''''
The most widely used alternative is probably SQLite_. SQLite
has relational, transactional, and persistence feature sets but does not have
a network protocol. Instead it must be embedded_
as a library inside another application.
Another example is the venerable ZODB_. ZODB has
network, transactional, and persistence feature sets
but replaces the relational data model
with an object data model.
As an extreme example of how less can be more, memcached_ has
only network features. Data is stored ephemerally in the form of opaque blobs without
any data model. There is no atomicity of updates: there is no way to ensure that
two writes either both succeed or both fail.
The so-called "NoSQL" databases (cassandra_, couchdb_, mongodb_, etc)
generally provide network and persistence features but lack a relational data model
and transactionality.
.. _embedded: https://docs.python.org/3/library/sqlite3.html
.. _SQLite: https://www.sqlite.org/
.. _ZODB: http://www.zodb.org/en/latest/
.. _memcached: https://memcached.org/
.. _cassandra: http://cassandra.apache.org/
.. _couchdb: http://couchdb.apache.org/
.. _mongodb: https://www.mongodb.com/
Relativity: Relational à la carte
'''''''''''''''''''''''''''''''''
In this design space, Relativity offers a relational feature set and nothing else.
Relativity allows you to build in-memory data structures that represent relationships
among arbitrary Python objects and then execute queries over those objects and
relationships via a very natural and pythonic API.
============= ====================
SQL Relativity
------------- --------------------
result-set sets and M2Ms
join chain and attach
order by sort and sorted
where-clause list comprehension
============= ====================
Architecture
------------
The fundamental unit of Relativity is the *relation*, in the form of
the ``M2M``. All other data structures are
various types of ``M2M`` containers. An ``M2M`` is a very simple
data structure that can be represented as two dicts:
.. code-block:: python
{key: set(vals)}
{val: set(keys)}
The main job of the ``M2M`` is to broadcast changes to the
underlying ``dict`` and ``set`` instances such that they are kept in
sync, and to enumerate all of the key, val pairs.
Similarly, the higher order data structures --
``M2MGraph``, ``M2MChain``, and ``M2MStar`` -- broadcast changes to
underlying ``M2M`` s and can return and enumerate them.
``M2MChain`` and ``M2MStar``: rows of relations
'''''''''''''''''''''''''''''''''''''''''''''''
``M2MChain`` and ``M2MStar`` are implemented as thin wrappers over a ``list``
of ``M2M``. The main feature they bring provide "row-iteration". The difference
between them is how they defined a row. ``M2MChain`` represents relationships
that connect end-to-end. ``M2MStar`` represents relationships that all
point to the same base object, similar to a `star schema`_.
.. _star schema: https://en.wikipedia.org/wiki/Star_schema
Shared ``M2M`` s
''''''''''''''''
All of the higher order data structures are concerned with the structure
between and among ``M2M`` s. The contents within a particular ``M2M`` does
not need to maintain any invariants. That is, if all of the ``M2M`` s within
one of the higher order data structures were scrambled up, the higher order
data structure would still be valid.
(Contrast with, if you were to scramble
the key sets and val sets around within an ``M2M``, it would be totally
inconsistent.)
This has important consequences, because it means that various instances
of ``M2MGraph``, ``M2MChain``, and ``M2MStar`` may *share* their underlying
``M2M`` s, and continue to update them. This means that all of these higher
order data structures can be treated as cheap and ephemeral.
For example, ``M2MGraph.chain(*cols)`` will construct and return a new
``M2MChain`` over the ``M2M`` s linking the passed columns. All that
actually happens here is the ``M2MGraph`` is queried for the underlying
``M2M`` s, then the list of ``M2M`` s is passed to the ``M2MChain``
constructor which simply holds a reference to them.
Another way to think of ``M2MGraph``, ``M2MChain`` and ``M2MStar`` is
as cheap views over the underlying ``M2M`` s. No matter how much data is in
the underlying ``M2M`` s, assembling one of these higher order data structures
over top has a fixed, low cost.
Relativity & Python Ecosystem
-----------------------------
Pandas_
'''''''
Both Relativity and Pandas enable clean extraction of data from a SQL database
to an in-memory data structure which may be further processed. Both libraries
provide data structures that can easily express queries over the in-memory
data-set that would otherwise be very difficult and tempt a developer to go
back to the database multiple times.
This sounds like Relativity and Pandas should be in competition; but, in practice
they are complementary. Whereas Pandas is excellent at representing tabular
data in rows and columns, Relativity excels at representing the foreign key
relationships that connect rows in different tables. Pandas makes it easy
to take a SQL result set and further refine it by filtering rows and addding
columns. Relativity makes it easy to extract the foreign key relationships
among many tables and further refine them by filtering by connectedness and
adding additional relationships.
.. _Pandas: http://pandas.pydata.org/pandas-docs/stable/getting_started/overview.html
When to Use
"""""""""""
Use Pandas_ for doing analysis of data within rows of a table; use
Relativity for doing analysis of the relationships between rows of
different tables.
Coming back to the students-and-classes example:
.. code-block:: python
class Enrollment(models.Model):
student = models.ForeignKey(Student)
class = models.ForeignKey(Class)
grade = models.FloatField() # 0.0 - 5.0
# Pandas is great at determining each students GPA
enrollments_data_frame.group_by(['student']).mean()
Better Together
"""""""""""""""
At a low-level, a Pandas_ ``Series`` and a Relaitivity ``M2M`` can
both represent multiple values per key, so it is easy to convert
between the two.
.. code-block:: python
>>> import pandas
>>> import relativity
>>> s = pandas.Series(data=[1, 2, 2], index=['a', 'a', 'b'])
>>> s
a 1
a 2
b 2
dtype: int64
>>> m2m = relativity.M2M(s.items())
>>> m2m
M2M([('a', 1L), ('a', 2L), ('b', 2L)])
>>> keys, vals = zip(*m2m.iteritems())
>>> s2 = pandas.Series(data=vals, index=keys)
>>> s2
a 1
a 2
b 2
dtype: int64
NetworkX_
'''''''''
NetworkX_ is the "graph theory library" of Python:
"NetworkX is a Python package for the creation, manipulation,
and study of the structure, dynamics, and functions of complex networks."
NetworkX_ is great at representing arbitrarily connections among a group
of nodes. Relativity has relationship-centric APIs and data-structures,
wehere the ``M2M`` represents a single relationship, and ``M2MChain``,
``M2MStar``, and ``M2MGraph`` build higher order connections.
Underneath, both are backed by ``dict``.
.. _NetworkX: https://networkx.github.io/
| /relativity-20.1.0.tar.gz/relativity-20.1.0/README.rst | 0.866627 | 0.775009 | README.rst | pypi |
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import List
import re
import xml.etree.ElementTree as ET
from .relaton_bib import lang_filter, to_ds_instance
from .localized_string import LocalizedString
from .affiliation import Affiliation
from .contributor import Contributor
def localized_string():
return to_ds_instance(LocalizedString)
@dataclass
class FullName:
surname: LocalizedString = None
completename: LocalizedString = None
forename: List[LocalizedString] = field(default_factory=list)
initial: List[LocalizedString] = field(default_factory=list)
addition: List[LocalizedString] = field(default_factory=list)
prefix: List[LocalizedString] = field(default_factory=list)
def __post_init__(self):
if not self.surname and not self.completename:
raise ValueError("Should be given surname or completename")
if isinstance(self.surname, str):
self.surname = LocalizedString(self.surname)
if isinstance(self.completename, str):
self.completename = LocalizedString(self.completename)
if isinstance(self.forename, str):
self.forename = [LocalizedString(self.forename)]
elif isinstance(self.forename, List):
self.forename = list(map(localized_string(), self.forename))
if isinstance(self.initial, str):
self.initial = [LocalizedString(self.initial)]
elif isinstance(self.initial, List):
self.initial = list(map(localized_string(), self.initial))
if isinstance(self.addition, str):
self.addition = [LocalizedString(self.addition)]
elif isinstance(self.addition, List):
self.addition = list(map(localized_string(), self.addition))
if isinstance(self.prefix, str):
self.prefix = [LocalizedString(self.prefix)]
elif isinstance(self.prefix, List):
self.prefix = list(map(localized_string(), self.prefix))
def to_xml(self, parent, opts={}):
name = "name"
result = ET.Element(name) if parent is None \
else ET.SubElement(parent, name)
if self.completename:
self.completename.to_xml(ET.SubElement(result, "completename"))
else:
for p in lang_filter(self.prefix, opts):
p.to_xml(ET.SubElement(result, "prefix"))
for f in lang_filter(self.forename, opts):
f.to_xml(ET.SubElement(result, "forename"))
for i in lang_filter(self.initial, opts):
i.to_xml(ET.SubElement(result, "initial"))
self.surname.to_xml(ET.SubElement(result, "surname"))
for a in lang_filter(self.addition, opts):
a.to_xml(ET.SubElement(result, "addition"))
return result
def to_asciibib(self, prefix):
prf = f"{prefix}.name." if prefix else "name."
out = [fn.to_asciibib(f"{prf}forename", len(self.forename))
for fn in self.forename]
out += [i.to_asciibib(f"{prf}initial", len(self.initial))
for i in self.initial]
if self.surname:
out.append(self.surname.to_asciibib(f"{prf}surname"))
out += [ad.to_asciibib(f"{prf}addition", len(self.addition))
for ad in self.addition]
out += [pr.to_asciibib(f"{prf}prefix", len(self.prefix))
for pr in self.prefix]
if self.completename:
out.append(self.completename.to_asciibib(f"{prf}completename"))
return "\n".join(out)
class PersonIdentifierType(Enum):
ISNI = "isni"
URI = "uri"
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@dataclass
class PersonIdentifier:
type: str
value: str
def __post_init__(self):
if not PersonIdentifierType.has_value(self.type):
raise ValueError('Invalid type. It should be "isni" or "uri".')
if isinstance(self.type, PersonIdentifierType):
self.type = self.type.value
def to_xml(self, parent):
name = "identifier"
result = ET.Element(name) if parent is None \
else ET.SubElement(parent, name)
result.text = self.value
result.attrib["type"] = self.type
def to_asciibib(self, prefix="", count=1):
pref = prefix + "." if prefix else prefix
out = [f"{prefix}::"] if count > 1 else []
out.append(f"{pref}type:: {self.type}")
out.append(f"{pref}value:: {self.value}")
return "\n".join(out)
@dataclass
class Person(Contributor):
name: FullName = None
affiliation: List[Affiliation] = field(default_factory=list)
identifier: List[PersonIdentifier] = field(default_factory=list)
def __post_init__(self):
# WORKAROUND for https://bugs.python.org/issue36077
if not self.name:
raise ValueError("missing name")
def to_xml(self, parent, opts={}):
name = "person"
result = ET.Element(name) if parent is None \
else ET.SubElement(parent, name)
self.name.to_xml(result, opts)
for a in self.affiliation:
a.to_xml(result, opts)
for i in self.identifier:
i.to_xml(result)
for c in self.contact:
c.to_xml(result)
return result
def to_asciibib(self, prefix="", count=1):
pref = re.sub(r"\*$", "person", prefix)
out = [f"{pref}::"] if count > 1 else []
out.append(self.name.to_asciibib(pref))
for a in self.affiliation:
out.append(a.to_asciibib(pref, len(self.affiliation)))
for i in self.identifier:
out.append(i.to_asciibib(pref, len(self.identifier)))
out.append(super().to_asciibib(pref))
return "\n".join([l for l in out if l])
def bib_name(self) -> str:
return str(self.name.completename) | /relaton_bib-0.2.0.tar.gz/relaton_bib-0.2.0/relaton_bib/person.py | 0.692226 | 0.163079 | person.py | pypi |
import datetime
import dataclasses
import re
from typing import Dict, Union, Type, Callable, TYPE_CHECKING
if TYPE_CHECKING:
from .localized_string import LocalizedString
class RequestError(Exception):
pass
def parse_date(date, str_res=True):
"""Convert date from string to date or string
"""
if date is datetime.datetime:
return date
cases = [
# February 2012
(r"(?P<date>\w+\s\d{4})", "%B %Y", "%Y-%m"),
# February 11, 2012
(r"(?P<date>\w+\s\d{1,2},\s\d{4})", "%B %d, %Y", "%Y-%m-%d"),
# 2012-02-11
(r"(?P<date>\d{4}-\d{2}-\d{2})", "%Y-%m-%d", None),
# 2012-02
(r"(?P<date>\d{4}-\d{2})", "%Y-%m", None),
# 2012
(r"(?P<date>\d{4})", "%Y", None),
]
for regexp, strp, strf in cases:
m = re.match(regexp, str(date))
if m:
value = m.group("date")
d = datetime.datetime.strptime(value, strp)
if strf:
return d.strftime(strf) if str_res else d
else:
return value if str_res else d.strftime(strp)
# TODO we don't need this for python probably
# @param array [Array]
# @return [Array<String>, String]
def single_element_array(array):
if len(array) > 1:
return map(lambda x: x if x is str else dataclasses.asdict(x), array)
else:
return array[0] if array[0] is str else dataclasses.asdict(array[0])
def lang_filter(target, opts={}):
lang = opts.get("lang")
filtered = list(filter(
lambda t: t.language and lang in t.language,
target))
return filtered if filtered else target
def to_ds_instance(target: Union[Type, Callable], fail=False):
def f(x):
if isinstance(x, target):
return x
elif isinstance(x, dict):
return target(**x)
elif isinstance(x, str):
return target(x)
elif fail:
ValueError(f"Unknown how to conver {type(x).__name__} to {target}")
else:
return x
return f
def delegate(to, *methods):
"""https://stackoverflow.com/a/55563139/902217"""
def dec(klass):
def create_delegator(method):
def delegator(self, *args, **kwargs):
obj = getattr(self, to)
m = getattr(obj, method)
return m(*args, **kwargs)
return delegator
for m in methods:
setattr(klass, m, create_delegator(m))
return klass
return dec
def dict_replace_key(d: Dict, keys_to_replace: Dict) -> Dict:
for (old_key, new_key) in keys_to_replace.items():
if old_key in d:
d[new_key] = d.pop(old_key)
return d | /relaton_bib-0.2.0.tar.gz/relaton_bib-0.2.0/relaton_bib/relaton_bib.py | 0.476092 | 0.249013 | relaton_bib.py | pypi |
from dataclasses import dataclass
from enum import Enum
import logging
import xml.etree.ElementTree as ET
from .formatted_ref import FormattedRef
from .localized_string import LocalizedString
from .typed_title_string import TypedTitleString
class SeriesType(str, Enum):
MAIN = "main"
ALT = "alt"
JOURNAL = "journal"
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@dataclass(frozen=True)
class Series:
type: str = None
formattedref: FormattedRef = None
title: TypedTitleString = None
place: str = None
organization: str = None
abbreviation: LocalizedString = None
from_: str = None
to: str = None
number: str = None
partnumber: str = None
def __post_init__(self):
if not(type(self.title) is TypedTitleString or self.formattedref):
raise ValueError("arg `title` or `formattedref` should present")
if self.type and not SeriesType.has_value(self.type):
logging.warning(
f"[relaton-bib] Series type is invalid: {self.type}")
# to_hash -> dataclasses.asdict
def to_xml(self, parent, opts={}):
name = "series"
node = ET.Element(name) if parent is None \
else ET.SubElement(parent, name)
if self.formattedref:
self.formattedref.to_xml(node)
else:
if self.title:
self.title.to_xml(ET.SubElement(node, "title"))
if self.place:
ET.SubElement(node, "place").text = self.place
if self.organization:
ET.SubElement(node, "organization").text = self.organization
if self.abbreviation:
self.abbreviation.to_xml(ET.SubElement(node, "abbreviation"))
if self.from_:
ET.SubElement(node, "from").text = self.from_
if self.to:
ET.SubElement(node, "to").text = self.to
if self.number:
ET.SubElement(node, "number").text = self.number
if self.partnumber:
ET.SubElement(node, "partnumber").text = self.partnumber
if self.type:
node.attrib["type"] = self.type
return node
def to_asciibib(self, prefix="", count=1):
pref = f"{prefix}.series" if prefix else "series"
lines = [f"{pref}::"] if count > 1 else []
if self.type:
lines.append(f"{pref}.type:: {self.type}")
if self.formattedref:
lines.append(self.formattedref.to_asciibib(pref))
if self.title:
lines.append(self.title.to_asciibib(pref))
if self.place:
lines.append(f"{pref}.place:: {self.place}")
if self.organization:
lines.append(f"{pref}.organization:: {self.organization}")
if self.abbreviation:
lines.append(self.abbreviation.to_asciibib(f"{pref}.abbreviation"))
if self.from_:
lines.append(f"{pref}.from:: {self.from_}")
if self.to:
lines.append(f"{pref}.to:: {self.to}")
if self.number:
lines.append(f"{pref}.number:: {self.number}")
if self.partnumber:
lines.append(f"{pref}.partnumber:: {self.partnumber}")
return "\n".join(lines) | /relaton_bib-0.2.0.tar.gz/relaton_bib-0.2.0/relaton_bib/series.py | 0.737914 | 0.153613 | series.py | pypi |
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
from poetry.core.semver.helpers import parse_constraint
from poetry.core.version.markers import parse_marker
if TYPE_CHECKING:
from .types import DependencyTypes
from poetry.core.semver.helpers import VersionTypes
from .package import Package
from .utils.utils import create_nested_marker
class ProjectPackage(Package):
def __init__(
self,
name: str,
version: Union[str, "VersionTypes"],
pretty_version: Optional[str] = None,
) -> None:
super(ProjectPackage, self).__init__(name, version, pretty_version)
self.build_config = dict()
self.packages = []
self.include = []
self.exclude = []
self.custom_urls = {}
if self._python_versions == "*":
self._python_constraint = parse_constraint("~2.7 || >=3.4")
@property
def build_script(self) -> Optional[str]:
return self.build_config.get("script")
def is_root(self) -> bool:
return True
def to_dependency(self) -> Union["DependencyTypes"]:
dependency = super(ProjectPackage, self).to_dependency()
dependency.is_root = True
return dependency
@property
def python_versions(self) -> Union[str, "VersionTypes"]:
return self._python_versions
@python_versions.setter
def python_versions(self, value: Union[str, "VersionTypes"]) -> None:
from poetry.core.semver.version_range import VersionRange
self._python_versions = value
if value == "*" or value == VersionRange():
value = "~2.7 || >=3.4"
self._python_constraint = parse_constraint(value)
self._python_marker = parse_marker(
create_nested_marker("python_version", self._python_constraint)
)
@property
def urls(self) -> Dict[str, Any]:
urls = super(ProjectPackage, self).urls
urls.update(self.custom_urls)
return urls
def build_should_generate_setup(self) -> bool:
return self.build_config.get("generate-setup-file", True) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/project_package.py | 0.890803 | 0.176459 | project_package.py | pypi |
from pathlib import Path
from typing import TYPE_CHECKING
from typing import FrozenSet
from typing import List
from typing import Optional
from typing import Union
if TYPE_CHECKING:
from .constraints import BaseConstraint # noqa
from ..semver.helpers import VersionTypes
from .dependency import Dependency
from .utils.utils import path_to_url
class DirectoryDependency(Dependency):
def __init__(
self,
name: str,
path: Path,
optional: bool = False,
base: Optional[Path] = None,
develop: bool = False,
extras: Optional[Union[List[str], FrozenSet[str]]] = None,
) -> None:
from poetry.core.pyproject.project import Project
self._path = path
self._base = base or Path.cwd()
self._full_path = path
if not self._path.is_absolute():
try:
self._full_path = self._base.joinpath(self._path).resolve()
except FileNotFoundError:
raise FileNotFoundError("Directory {} does not exist".format(self._path))
self._develop = develop
self._supports_poetry = False
if not self._full_path.exists():
raise FileNotFoundError("Directory {} does not exist".format(self._path))
if self._full_path.is_file():
raise FileNotFoundError("{} is a file, expected a directory".format(self._path))
# Checking content to determine actions
setup = self._full_path / "setup.py"
self._supports_poetry = Project.has_poetry_section(self._full_path / "pyproject.toml")
if not setup.exists() and not self._supports_poetry:
raise FileNotFoundError(
"Directory {} does not seem to be a Python package".format(
self._full_path
)
)
super(DirectoryDependency, self).__init__(
name,
"*",
optional=optional,
allows_prereleases=True,
source_type="directory",
source_url=self._full_path.as_posix(),
extras=extras,
)
@property
def path(self) -> Path:
return self._path
@property
def full_path(self) -> Path:
return self._full_path
@property
def base(self) -> Path:
return self._base
@property
def develop(self) -> bool:
return self._develop
def supports_poetry(self) -> bool:
return self._supports_poetry
def is_directory(self) -> bool:
return True
def with_constraint(self, constraint: "BaseConstraint") -> "DirectoryDependency":
new = DirectoryDependency(
self.pretty_name,
path=self.path,
base=self.base,
optional=self.is_optional(),
develop=self._develop,
extras=self._extras,
)
new._constraint = constraint
new._pretty_constraint = str(constraint)
new.is_root = self.is_root
new.python_versions = self.python_versions
new.marker = self.marker
new.transitive_marker = self.transitive_marker
for in_extra in self.in_extras:
new.in_extras.append(in_extra)
return new
@property
def base_pep_508_name(self) -> str:
requirement = self.pretty_name
if self.extras:
requirement += "[{}]".format(",".join(self.extras))
path = path_to_url(self.path) if self.path.is_absolute() else self.path
requirement += " @ {}".format(path)
return requirement
def __str__(self) -> str:
if self.is_root:
return self._pretty_name
return "{} ({} {})".format(
self._pretty_name, self._pretty_constraint, self._path.as_posix()
)
def __hash__(self) -> int:
return hash((self._name, self._full_path.as_posix()))
class SiblingDependency(DirectoryDependency):
def __init__(
self,
name: str,
path: Path,
constraint: Union[str, "VersionTypes"],
optional: bool = False,
base: Optional[Path] = None,
extras: Optional[Union[List[str], FrozenSet[str]]] = None,
):
super(SiblingDependency, self).__init__(
name, path, optional=optional, base=base, extras=extras, develop=True)
self._source_type = "sibling"
self.set_constraint(constraint)
@property
def base_pep_508_name(self) -> str:
from poetry.core.packages.dependency import base_pep_508_name_of
return base_pep_508_name_of(self) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/directory_dependency.py | 0.906743 | 0.19729 | directory_dependency.py | pypi |
from typing import TYPE_CHECKING
from typing import FrozenSet
from typing import List
from typing import Optional
from typing import Union
from .dependency import Dependency
if TYPE_CHECKING:
from .constraints import BaseConstraint
class VCSDependency(Dependency):
"""
Represents a VCS dependency
"""
def __init__(
self,
name: str,
vcs: str,
source: str,
branch: Optional[str] = None,
tag: Optional[str] = None,
rev: Optional[str] = None,
resolved_rev: Optional[str] = None,
directory: Optional[str] = None,
groups: Optional[List[str]] = None,
optional: bool = False,
develop: bool = False,
extras: Union[List[str], FrozenSet[str]] = None,
):
self._vcs = vcs
self._source = source
self._branch = branch
self._tag = tag
self._rev = rev
self._directory = directory
self._develop = develop
super(VCSDependency, self).__init__(
name,
"*",
optional=optional,
allows_prereleases=True,
source_type=self._vcs.lower(),
source_url=self._source,
source_reference=branch or tag or rev,
source_resolved_reference=resolved_rev,
source_subdirectory=directory,
extras=extras,
)
@property
def vcs(self) -> str:
return self._vcs
@property
def source(self) -> str:
return self._source
@property
def branch(self) -> Optional[str]:
return self._branch
@property
def tag(self) -> Optional[str]:
return self._tag
@property
def rev(self) -> Optional[str]:
return self._rev
@property
def directory(self) -> Optional[str]:
return self._directory
@property
def develop(self) -> bool:
return self._develop
@property
def reference(self) -> str:
return self._branch or self._tag or self._rev
@property
def pretty_constraint(self) -> str:
if self._branch:
what = "branch"
version = self._branch
elif self._tag:
what = "tag"
version = self._tag
else:
what = "rev"
version = self._rev
return "{} {}".format(what, version)
@property
def base_pep_508_name(self) -> str:
from poetry.core.vcs import git
requirement = self.pretty_name
parsed_url = git.ParsedUrl.parse(self._source)
if self.extras:
requirement += "[{}]".format(",".join(self.extras))
if parsed_url.protocol is not None:
requirement += " @ {}+{}".format(self._vcs, self._source)
else:
requirement += " @ {}+ssh://{}".format(self._vcs, parsed_url.format())
if self.reference:
requirement += f"@{self.reference}"
if self._directory:
requirement += f"#subdirectory{self._directory}"
return requirement
def is_vcs(self) -> bool:
return True
def accepts_prereleases(self) -> bool:
return True
def with_constraint(self, constraint: "BaseConstraint") -> "VCSDependency":
new = VCSDependency(
self.pretty_name,
self._vcs,
self._source,
branch=self._branch,
tag=self._tag,
rev=self._rev,
resolved_rev=self._source_resolved_reference,
directory=self.directory,
optional=self.is_optional(),
develop=self._develop,
extras=self._extras,
)
new._constraint = constraint
new._pretty_constraint = str(constraint)
new.is_root = self.is_root
new.python_versions = self.python_versions
new.marker = self.marker
new.transitive_marker = self.transitive_marker
for in_extra in self.in_extras:
new.in_extras.append(in_extra)
return new
def __str__(self) -> str:
reference = self._vcs
if self._branch:
reference += " branch {}".format(self._branch)
elif self._tag:
reference += " tag {}".format(self._tag)
elif self._rev:
reference += " rev {}".format(self._rev)
return "{} ({} {})".format(self._pretty_name, self._constraint, reference)
def __hash__(self) -> int:
return hash((self._name, self._vcs, self._branch, self._tag, self._rev)) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/vcs_dependency.py | 0.895297 | 0.17441 | vcs_dependency.py | pypi |
import operator
from typing import TYPE_CHECKING
from typing import Any
from typing import Union
from .base_constraint import BaseConstraint
from .empty_constraint import EmptyConstraint
if TYPE_CHECKING:
from . import ConstraintTypes # noqa
class Constraint(BaseConstraint):
OP_EQ = operator.eq
OP_NE = operator.ne
_trans_op_str = {"=": OP_EQ, "==": OP_EQ, "!=": OP_NE}
_trans_op_int = {OP_EQ: "==", OP_NE: "!="}
def __init__(self, version: str, operator: str = "==") -> None:
if operator == "=":
operator = "=="
self._version = version
self._operator = operator
self._op = self._trans_op_str[operator]
@property
def version(self) -> str:
return self._version
@property
def operator(self) -> str:
return self._operator
def allows(self, other: "ConstraintTypes") -> bool:
is_equal_op = self._operator == "=="
is_non_equal_op = self._operator == "!="
is_other_equal_op = other.operator == "=="
is_other_non_equal_op = other.operator == "!="
if is_equal_op and is_other_equal_op:
return self._version == other.version
if (
is_equal_op
and is_other_non_equal_op
or is_non_equal_op
and is_other_equal_op
or is_non_equal_op
and is_other_non_equal_op
):
return self._version != other.version
return False
def allows_all(self, other: "ConstraintTypes") -> bool:
if not isinstance(other, Constraint):
return other.is_empty()
return other == self
def allows_any(self, other: "ConstraintTypes") -> bool:
if isinstance(other, Constraint):
is_non_equal_op = self._operator == "!="
is_other_non_equal_op = other.operator == "!="
if is_non_equal_op and is_other_non_equal_op:
return self._version != other.version
return other.allows(self)
def difference(
self, other: "ConstraintTypes"
) -> Union["Constraint", "EmptyConstraint"]:
if other.allows(self):
return EmptyConstraint()
return self
def intersect(self, other: "ConstraintTypes") -> "ConstraintTypes":
from .multi_constraint import MultiConstraint
if isinstance(other, Constraint):
if other == self:
return self
if self.operator == "!=" and other.operator == "==" and self.allows(other):
return other
if other.operator == "!=" and self.operator == "==" and other.allows(self):
return self
if other.operator == "!=" and self.operator == "!=":
return MultiConstraint(self, other)
return EmptyConstraint()
return other.intersect(self)
def union(self, other: "ConstraintTypes") -> "ConstraintTypes":
if isinstance(other, Constraint):
from .union_constraint import UnionConstraint
return UnionConstraint(self, other)
return other.union(self)
def is_any(self) -> bool:
return False
def is_empty(self) -> bool:
return False
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Constraint):
return NotImplemented
return (self.version, self.operator) == (other.version, other.operator)
def __hash__(self) -> int:
return hash((self._operator, self._version))
def __str__(self) -> str:
return "{}{}".format(
self._operator if self._operator != "==" else "", self._version
) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/constraints/constraint.py | 0.828939 | 0.333178 | constraint.py | pypi |
from typing import TYPE_CHECKING
from typing import Tuple
from typing import Union
from .base_constraint import BaseConstraint
from .constraint import Constraint
from .empty_constraint import EmptyConstraint
from .multi_constraint import MultiConstraint
if TYPE_CHECKING:
from . import ConstraintTypes # noqa
class UnionConstraint(BaseConstraint):
def __init__(self, *constraints: Constraint) -> None:
self._constraints = constraints
@property
def constraints(self) -> Tuple[Constraint]:
return self._constraints
def allows(
self, other: Union[Constraint, MultiConstraint, "UnionConstraint"]
) -> bool:
for constraint in self._constraints:
if constraint.allows(other):
return True
return False
def allows_any(self, other: "ConstraintTypes") -> bool:
if other.is_empty():
return False
if other.is_any():
return True
if isinstance(other, Constraint):
constraints = [other]
else:
constraints = other.constraints
for our_constraint in self._constraints:
for their_constraint in constraints:
if our_constraint.allows_any(their_constraint):
return True
return False
def allows_all(self, other: "ConstraintTypes") -> bool:
if other.is_any():
return False
if other.is_empty():
return True
if isinstance(other, Constraint):
constraints = [other]
else:
constraints = other.constraints
our_constraints = iter(self._constraints)
their_constraints = iter(constraints)
our_constraint = next(our_constraints, None)
their_constraint = next(their_constraints, None)
while our_constraint and their_constraint:
if our_constraint.allows_all(their_constraint):
their_constraint = next(their_constraints, None)
else:
our_constraint = next(our_constraints, None)
return their_constraint is None
def intersect(self, other: "ConstraintTypes") -> "ConstraintTypes":
if other.is_any():
return self
if other.is_empty():
return other
if isinstance(other, Constraint):
if self.allows(other):
return other
return EmptyConstraint()
new_constraints = []
for our_constraint in self._constraints:
for their_constraint in other.constraints:
intersection = our_constraint.intersect(their_constraint)
if not intersection.is_empty() and intersection not in new_constraints:
new_constraints.append(intersection)
if not new_constraints:
return EmptyConstraint()
return UnionConstraint(*new_constraints)
def union(self, other: Constraint) -> "UnionConstraint":
if isinstance(other, Constraint):
constraints = self._constraints
if other not in self._constraints:
constraints += (other,)
return UnionConstraint(*constraints)
def __eq__(self, other: "ConstraintTypes") -> bool:
if not isinstance(other, UnionConstraint):
return False
return sorted(
self._constraints, key=lambda c: (c.operator, c.version)
) == sorted(other.constraints, key=lambda c: (c.operator, c.version))
def __str__(self) -> str:
constraints = []
for constraint in self._constraints:
constraints.append(str(constraint))
return "{}".format(" || ").join(constraints) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/constraints/union_constraint.py | 0.914264 | 0.190705 | union_constraint.py | pypi |
from typing import TYPE_CHECKING
from typing import Any
from typing import Tuple
from .base_constraint import BaseConstraint
from .constraint import Constraint
if TYPE_CHECKING:
from . import ConstraintTypes # noqa
class MultiConstraint(BaseConstraint):
def __init__(self, *constraints: Constraint) -> None:
if any(c.operator == "==" for c in constraints):
raise ValueError(
"A multi-constraint can only be comprised of negative constraints"
)
self._constraints = constraints
@property
def constraints(self) -> Tuple[Constraint]:
return self._constraints
def allows(self, other: "ConstraintTypes") -> bool:
for constraint in self._constraints:
if not constraint.allows(other):
return False
return True
def allows_all(self, other: "ConstraintTypes") -> bool:
if other.is_any():
return False
if other.is_empty():
return True
if isinstance(other, Constraint):
return self.allows(other)
our_constraints = iter(self._constraints)
their_constraints = iter(other.constraints)
our_constraint = next(our_constraints, None)
their_constraint = next(their_constraints, None)
while our_constraint and their_constraint:
if our_constraint.allows_all(their_constraint):
their_constraint = next(their_constraints, None)
else:
our_constraint = next(our_constraints, None)
return their_constraint is None
def allows_any(self, other: "ConstraintTypes") -> bool:
if other.is_any():
return True
if other.is_empty():
return True
if isinstance(other, Constraint):
return self.allows(other)
if isinstance(other, MultiConstraint):
for c1 in self.constraints:
for c2 in other.constraints:
if c1.allows(c2):
return True
return False
def intersect(self, other: Constraint) -> "MultiConstraint":
if isinstance(other, Constraint):
constraints = self._constraints
if other not in constraints:
constraints += (other,)
else:
constraints = (other,)
if len(constraints) == 1:
return constraints[0]
return MultiConstraint(*constraints)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, MultiConstraint):
return False
return sorted(
self._constraints, key=lambda c: (c.operator, c.version)
) == sorted(other.constraints, key=lambda c: (c.operator, c.version))
def __str__(self) -> str:
constraints = []
for constraint in self._constraints:
constraints.append(str(constraint))
return "{}".format(", ").join(constraints) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/constraints/multi_constraint.py | 0.87728 | 0.208562 | multi_constraint.py | pypi |
import posixpath
import re
import urllib.parse as urlparse
from typing import Any
from typing import Optional
from typing import Tuple
from .utils import path_to_url
from .utils import splitext
class Link:
def __init__(
self,
url: str,
comes_from: Optional[Any] = None,
requires_python: Optional[str] = None,
) -> None:
"""
Object representing a parsed link from https://pypi.python.org/simple/*
url:
url of the resource pointed to (href of the link)
comes_from:
instance of HTMLPage where the link was found, or string.
requires_python:
String containing the `Requires-Python` metadata field, specified
in PEP 345. This may be specified by a data-requires-python
attribute in the HTML link tag, as described in PEP 503.
"""
# url can be a UNC windows share
if url.startswith("\\\\"):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.requires_python = requires_python if requires_python else None
def __str__(self) -> str:
if self.requires_python:
rp = " (requires-python:%s)" % self.requires_python
else:
rp = ""
if self.comes_from:
return "%s (from %s)%s" % (self.url, self.comes_from, rp)
else:
return str(self.url)
def __repr__(self) -> str:
return "<Link %s>" % self
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other: Any) -> bool:
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self) -> int:
return hash(self.url)
@property
def filename(self) -> str:
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip("/")) or netloc
name = urlparse.unquote(name)
return name
@property
def scheme(self) -> str:
return urlparse.urlsplit(self.url)[0]
@property
def netloc(self) -> str:
return urlparse.urlsplit(self.url)[1]
@property
def path(self) -> str:
return urlparse.unquote(urlparse.urlsplit(self.url)[2])
def splitext(self) -> Tuple[str, str]:
return splitext(posixpath.basename(self.path.rstrip("/")))
@property
def ext(self) -> str:
return self.splitext()[1]
@property
def url_without_fragment(self) -> str:
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
@property
def egg_fragment(self) -> Optional[str]:
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
@property
def subdirectory_fragment(self) -> Optional[str]:
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)")
@property
def hash(self) -> Optional[str]:
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self) -> Optional[str]:
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self) -> str:
return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0])
@property
def is_wheel(self) -> bool:
return self.ext == ".whl"
@property
def is_wininst(self) -> bool:
return self.ext == ".exe"
@property
def is_egg(self) -> bool:
return self.ext == ".egg"
@property
def is_sdist(self) -> bool:
return self.ext in {".tar.bz2", ".tar.gz", ".zip"}
@property
def is_artifact(self) -> bool:
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
if self.scheme in ["ssh", "git", "hg", "bzr", "sftp", "svn"]:
return False
return True | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/packages/utils/link.py | 0.887644 | 0.15109 | link.py | pypi |
from __future__ import absolute_import, division, print_function
import re
from ._typing import TYPE_CHECKING, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
if TYPE_CHECKING: # pragma: no cover
from typing import FrozenSet, NewType, Tuple, Union
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
else:
BuildTag = tuple
NormalizedName = str
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
_canonicalize_regex = re.compile(r"[-_.]+")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name):
# type: (str) -> NormalizedName
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def canonicalize_version(version):
# type: (Union[Version, str]) -> Union[Version, str]
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if not isinstance(version, Version):
try:
version = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
parts = []
# Epoch
if version.epoch != 0:
parts.append("{0}!".format(version.epoch))
# Release segment
# NB: This strips trailing '.0's to normalize
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
# Pre-release
if version.pre is not None:
parts.append("".join(str(x) for x in version.pre))
# Post-release
if version.post is not None:
parts.append(".post{0}".format(version.post))
# Development release
if version.dev is not None:
parts.append(".dev{0}".format(version.dev))
# Local version segment
if version.local is not None:
parts.append("+{0}".format(version.local))
return "".join(parts)
def parse_wheel_filename(filename):
# type: (str) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
"Invalid wheel filename (extension must be '.whl'): {0}".format(filename)
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
"Invalid wheel filename (wrong number of parts): {0}".format(filename)
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename("Invalid project name: {0}".format(filename))
name = canonicalize_name(name_part)
version = Version(parts[1])
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
"Invalid build number: {0} in '{1}'".format(build_part, filename)
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename):
# type: (str) -> Tuple[NormalizedName, Version]
if not filename.endswith(".tar.gz"):
raise InvalidSdistFilename(
"Invalid sdist filename (extension must be '.tar.gz'): {0}".format(filename)
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = filename[:-7].rpartition("-")
if not sep:
raise InvalidSdistFilename("Invalid sdist filename: {0}".format(filename))
name = canonicalize_name(name_part)
version = Version(version_part)
return (name, version) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/packaging/utils.py | 0.731346 | 0.188548 | utils.py | pypi |
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pyparsing import ( # noqa: N817
Forward,
Group,
Literal as L,
ParseException,
ParseResults,
QuotedString,
ZeroOrMore,
stringEnd,
stringStart,
)
from ._compat import string_types
from ._typing import TYPE_CHECKING
from .specifiers import InvalidSpecifier, Specifier
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
# type: (Any) -> None
self.value = value
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
# type: () -> str
raise NotImplementedError
class Variable(Node):
def serialize(self):
# type: () -> str
return str(self)
class Value(Node):
def serialize(self):
# type: () -> str
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
# type: () -> str
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
# type: (Union[ParseResults, List[Any]]) -> List[Any]
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize()) # type: Optional[Operator]
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
# type: (sys._version_info) -> str
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
# type: (str) -> None
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/packaging/markers.py | 0.651133 | 0.206874 | markers.py | pypi |
from .exceptions import GrammarError
from .lexer import Token
from .tree import Tree
from .visitors import InlineTransformer # XXX Deprecated
from .visitors import Transformer_InPlace
from .visitors import _vargs_meta, _vargs_meta_inline
###{standalone
from functools import partial, wraps
from itertools import repeat, product
class ExpandSingleChild:
def __init__(self, node_builder):
self.node_builder = node_builder
def __call__(self, children):
if len(children) == 1:
return children[0]
else:
return self.node_builder(children)
class PropagatePositions:
def __init__(self, node_builder):
self.node_builder = node_builder
def __call__(self, children):
res = self.node_builder(children)
# local reference to Tree.meta reduces number of presence checks
if isinstance(res, Tree):
res_meta = res.meta
for c in children:
if isinstance(c, Tree):
child_meta = c.meta
if not child_meta.empty:
res_meta.line = child_meta.line
res_meta.column = child_meta.column
res_meta.start_pos = child_meta.start_pos
res_meta.empty = False
break
elif isinstance(c, Token):
res_meta.line = c.line
res_meta.column = c.column
res_meta.start_pos = c.pos_in_stream
res_meta.empty = False
break
for c in reversed(children):
if isinstance(c, Tree):
child_meta = c.meta
if not child_meta.empty:
res_meta.end_line = child_meta.end_line
res_meta.end_column = child_meta.end_column
res_meta.end_pos = child_meta.end_pos
res_meta.empty = False
break
elif isinstance(c, Token):
res_meta.end_line = c.end_line
res_meta.end_column = c.end_column
res_meta.end_pos = c.end_pos
res_meta.empty = False
break
return res
class ChildFilter:
def __init__(self, to_include, append_none, node_builder):
self.node_builder = node_builder
self.to_include = to_include
self.append_none = append_none
def __call__(self, children):
filtered = []
for i, to_expand, add_none in self.to_include:
if add_none:
filtered += [None] * add_none
if to_expand:
filtered += children[i].children
else:
filtered.append(children[i])
if self.append_none:
filtered += [None] * self.append_none
return self.node_builder(filtered)
class ChildFilterLALR(ChildFilter):
"Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)"
def __call__(self, children):
filtered = []
for i, to_expand, add_none in self.to_include:
if add_none:
filtered += [None] * add_none
if to_expand:
if filtered:
filtered += children[i].children
else: # Optimize for left-recursion
filtered = children[i].children
else:
filtered.append(children[i])
if self.append_none:
filtered += [None] * self.append_none
return self.node_builder(filtered)
class ChildFilterLALR_NoPlaceholders(ChildFilter):
"Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)"
def __init__(self, to_include, node_builder):
self.node_builder = node_builder
self.to_include = to_include
def __call__(self, children):
filtered = []
for i, to_expand in self.to_include:
if to_expand:
if filtered:
filtered += children[i].children
else: # Optimize for left-recursion
filtered = children[i].children
else:
filtered.append(children[i])
return self.node_builder(filtered)
def _should_expand(sym):
return not sym.is_term and sym.name.startswith('_')
def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices):
# Prepare empty_indices as: How many Nones to insert at each index?
if _empty_indices:
assert _empty_indices.count(False) == len(expansion)
s = ''.join(str(int(b)) for b in _empty_indices)
empty_indices = [len(ones) for ones in s.split('0')]
assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion))
else:
empty_indices = [0] * (len(expansion)+1)
to_include = []
nones_to_add = 0
for i, sym in enumerate(expansion):
nones_to_add += empty_indices[i]
if keep_all_tokens or not (sym.is_term and sym.filter_out):
to_include.append((i, _should_expand(sym), nones_to_add))
nones_to_add = 0
nones_to_add += empty_indices[len(expansion)]
if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include):
if _empty_indices or ambiguous:
return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add)
else:
# LALR without placeholders
return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include])
class AmbiguousExpander:
"""Deal with the case where we're expanding children ('_rule') into a parent but the children
are ambiguous. i.e. (parent->_ambig->_expand_this_rule). In this case, make the parent itself
ambiguous with as many copies as their are ambiguous children, and then copy the ambiguous children
into the right parents in the right places, essentially shifting the ambiguiuty up the tree."""
def __init__(self, to_expand, tree_class, node_builder):
self.node_builder = node_builder
self.tree_class = tree_class
self.to_expand = to_expand
def __call__(self, children):
def _is_ambig_tree(child):
return hasattr(child, 'data') and child.data == '_ambig'
#### When we're repeatedly expanding ambiguities we can end up with nested ambiguities.
# All children of an _ambig node should be a derivation of that ambig node, hence
# it is safe to assume that if we see an _ambig node nested within an ambig node
# it is safe to simply expand it into the parent _ambig node as an alternative derivation.
ambiguous = []
for i, child in enumerate(children):
if _is_ambig_tree(child):
if i in self.to_expand:
ambiguous.append(i)
to_expand = [j for j, grandchild in enumerate(child.children) if _is_ambig_tree(grandchild)]
child.expand_kids_by_index(*to_expand)
if not ambiguous:
return self.node_builder(children)
expand = [ iter(child.children) if i in ambiguous else repeat(child) for i, child in enumerate(children) ]
return self.tree_class('_ambig', [self.node_builder(list(f[0])) for f in product(zip(*expand))])
def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens):
to_expand = [i for i, sym in enumerate(expansion)
if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))]
if to_expand:
return partial(AmbiguousExpander, to_expand, tree_class)
def ptb_inline_args(func):
@wraps(func)
def f(children):
return func(*children)
return f
def inplace_transformer(func):
@wraps(func)
def f(children):
# function name in a Transformer is a rule name.
tree = Tree(func.__name__, children)
return func(tree)
return f
def apply_visit_wrapper(func, name, wrapper):
if wrapper is _vargs_meta or wrapper is _vargs_meta_inline:
raise NotImplementedError("Meta args not supported for internal transformer")
@wraps(func)
def f(children):
return wrapper(func, name, children, None)
return f
class ParseTreeBuilder:
def __init__(self, rules, tree_class, propagate_positions=False, keep_all_tokens=False, ambiguous=False, maybe_placeholders=False):
self.tree_class = tree_class
self.propagate_positions = propagate_positions
self.always_keep_all_tokens = keep_all_tokens
self.ambiguous = ambiguous
self.maybe_placeholders = maybe_placeholders
self.rule_builders = list(self._init_builders(rules))
def _init_builders(self, rules):
for rule in rules:
options = rule.options
keep_all_tokens = self.always_keep_all_tokens or options.keep_all_tokens
expand_single_child = options.expand1
wrapper_chain = list(filter(None, [
(expand_single_child and not rule.alias) and ExpandSingleChild,
maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None),
self.propagate_positions and PropagatePositions,
self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens),
]))
yield rule, wrapper_chain
def create_callback(self, transformer=None):
callbacks = {}
for rule, wrapper_chain in self.rule_builders:
user_callback_name = rule.alias or rule.options.template_source or rule.origin.name
try:
f = getattr(transformer, user_callback_name)
# XXX InlineTransformer is deprecated!
wrapper = getattr(f, 'visit_wrapper', None)
if wrapper is not None:
f = apply_visit_wrapper(f, user_callback_name, wrapper)
else:
if isinstance(transformer, InlineTransformer):
f = ptb_inline_args(f)
elif isinstance(transformer, Transformer_InPlace):
f = inplace_transformer(f)
except AttributeError:
f = partial(self.tree_class, user_callback_name)
for w in wrapper_chain:
f = w(f)
if rule in callbacks:
raise GrammarError("Rule '%s' already exists" % (rule,))
callbacks[rule] = f
return callbacks
###} | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parse_tree_builder.py | 0.53607 | 0.160036 | parse_tree_builder.py | pypi |
try:
from future_builtins import filter
except ImportError:
pass
from copy import deepcopy
from collections import OrderedDict
###{standalone
class Meta:
def __init__(self):
self.empty = True
class Tree(object):
def __init__(self, data, children, meta=None):
self.data = data
self.children = children
self._meta = meta
@property
def meta(self):
if self._meta is None:
self._meta = Meta()
return self._meta
def __repr__(self):
return 'Tree(%s, %s)' % (self.data, self.children)
def _pretty_label(self):
return self.data
def _pretty(self, level, indent_str):
if len(self.children) == 1 and not isinstance(self.children[0], Tree):
return [ indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n']
l = [ indent_str*level, self._pretty_label(), '\n' ]
for n in self.children:
if isinstance(n, Tree):
l += n._pretty(level+1, indent_str)
else:
l += [ indent_str*(level+1), '%s' % (n,), '\n' ]
return l
def pretty(self, indent_str=' '):
return ''.join(self._pretty(0, indent_str))
def __eq__(self, other):
try:
return self.data == other.data and self.children == other.children
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.data, tuple(self.children)))
def iter_subtrees(self):
queue = [self]
subtrees = OrderedDict()
for subtree in queue:
subtrees[id(subtree)] = subtree
queue += [c for c in reversed(subtree.children)
if isinstance(c, Tree) and id(c) not in subtrees]
del queue
return reversed(list(subtrees.values()))
def find_pred(self, pred):
"Find all nodes where pred(tree) == True"
return filter(pred, self.iter_subtrees())
def find_data(self, data):
"Find all nodes where tree.data == data"
return self.find_pred(lambda t: t.data == data)
###}
def expand_kids_by_index(self, *indices):
"Expand (inline) children at the given indices"
for i in sorted(indices, reverse=True): # reverse so that changing tail won't affect indices
kid = self.children[i]
self.children[i:i+1] = kid.children
def scan_values(self, pred):
for c in self.children:
if isinstance(c, Tree):
for t in c.scan_values(pred):
yield t
else:
if pred(c):
yield c
def iter_subtrees_topdown(self):
stack = [self]
while stack:
node = stack.pop()
if not isinstance(node, Tree):
continue
yield node
for n in reversed(node.children):
stack.append(n)
def __deepcopy__(self, memo):
return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta)
def copy(self):
return type(self)(self.data, self.children)
def set(self, data, children):
self.data = data
self.children = children
# XXX Deprecated! Here for backwards compatibility <0.6.0
@property
def line(self):
return self.meta.line
@property
def column(self):
return self.meta.column
@property
def end_line(self):
return self.meta.end_line
@property
def end_column(self):
return self.meta.end_column
class SlottedTree(Tree):
__slots__ = 'data', 'children', 'rule', '_meta'
def pydot__tree_to_png(tree, filename, rankdir="LR", **kwargs):
"""Creates a colorful image that represents the tree (data+children, without meta)
Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to
directed graphs drawn from top to bottom, from left to right, from bottom to
top, and from right to left, respectively.
`kwargs` can be any graph attribute (e. g. `dpi=200`). For a list of
possible attributes, see https://www.graphviz.org/doc/info/attrs.html.
"""
import pydot
graph = pydot.Dot(graph_type='digraph', rankdir=rankdir, **kwargs)
i = [0]
def new_leaf(leaf):
node = pydot.Node(i[0], label=repr(leaf))
i[0] += 1
graph.add_node(node)
return node
def _to_pydot(subtree):
color = hash(subtree.data) & 0xffffff
color |= 0x808080
subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child)
for child in subtree.children]
node = pydot.Node(i[0], style="filled", fillcolor="#%x"%color, label=subtree.data)
i[0] += 1
graph.add_node(node)
for subnode in subnodes:
graph.add_edge(pydot.Edge(node, subnode))
return node
_to_pydot(tree)
graph.write_png(filename) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/tree.py | 0.780035 | 0.276039 | tree.py | pypi |
from .utils import Serialize
###{standalone
class Symbol(Serialize):
__slots__ = ('name',)
is_term = NotImplemented
def __init__(self, name):
self.name = name
def __eq__(self, other):
assert isinstance(other, Symbol), other
return self.is_term == other.is_term and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.name)
fullrepr = property(__repr__)
class Terminal(Symbol):
__serialize_fields__ = 'name', 'filter_out'
is_term = True
def __init__(self, name, filter_out=False):
self.name = name
self.filter_out = filter_out
@property
def fullrepr(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out)
class NonTerminal(Symbol):
__serialize_fields__ = 'name',
is_term = False
class RuleOptions(Serialize):
__serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices'
def __init__(self, keep_all_tokens=False, expand1=False, priority=None, template_source=None, empty_indices=()):
self.keep_all_tokens = keep_all_tokens
self.expand1 = expand1
self.priority = priority
self.template_source = template_source
self.empty_indices = empty_indices
def __repr__(self):
return 'RuleOptions(%r, %r, %r, %r)' % (
self.keep_all_tokens,
self.expand1,
self.priority,
self.template_source
)
class Rule(Serialize):
"""
origin : a symbol
expansion : a list of symbols
order : index of this expansion amongst all rules of the same name
"""
__slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash')
__serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options'
__serialize_namespace__ = Terminal, NonTerminal, RuleOptions
def __init__(self, origin, expansion, order=0, alias=None, options=None):
self.origin = origin
self.expansion = expansion
self.alias = alias
self.order = order
self.options = options or RuleOptions()
self._hash = hash((self.origin, tuple(self.expansion)))
def _deserialize(self):
self._hash = hash((self.origin, tuple(self.expansion)))
def __str__(self):
return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion))
def __repr__(self):
return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Rule):
return False
return self.origin == other.origin and self.expansion == other.expansion
###} | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/grammar.py | 0.728072 | 0.187096 | grammar.py | pypi |
from functools import wraps
from .utils import smart_decorator, combine_alternatives
from .tree import Tree
from .exceptions import VisitError, GrammarError
from .lexer import Token
###{standalone
from inspect import getmembers, getmro
class Discard(Exception):
pass
# Transformers
class _Decoratable:
@classmethod
def _apply_decorator(cls, decorator, **kwargs):
mro = getmro(cls)
assert mro[0] is cls
libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)}
for name, value in getmembers(cls):
# Make sure the function isn't inherited (unless it's overwritten)
if name.startswith('_') or (name in libmembers and name not in cls.__dict__):
continue
if not callable(value):
continue
# Skip if v_args already applied (at the function level)
if hasattr(cls.__dict__[name], 'vargs_applied') or hasattr(value, 'vargs_applied'):
continue
static = isinstance(cls.__dict__[name], (staticmethod, classmethod))
setattr(cls, name, decorator(value, static=static, **kwargs))
return cls
def __class_getitem__(cls, _):
return cls
class Transformer(_Decoratable):
"""Visits the tree recursively, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
The returned value replaces the old one in the structure.
Can be used to implement map or reduce.
"""
__visit_tokens__ = True # For backwards compatibility
def __init__(self, visit_tokens=True):
self.__visit_tokens__ = visit_tokens
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
try:
wrapper = getattr(f, 'visit_wrapper', None)
if wrapper is not None:
return f.visit_wrapper(f, tree.data, children, tree.meta)
else:
return f(children)
except (GrammarError, Discard):
raise
except Exception as e:
raise VisitError(tree.data, tree, e)
def _call_userfunc_token(self, token):
try:
f = getattr(self, token.type)
except AttributeError:
return self.__default_token__(token)
else:
try:
return f(token)
except (GrammarError, Discard):
raise
except Exception as e:
raise VisitError(token.type, token, e)
def _transform_children(self, children):
for c in children:
try:
if isinstance(c, Tree):
yield self._transform_tree(c)
elif self.__visit_tokens__ and isinstance(c, Token):
yield self._call_userfunc_token(c)
else:
yield c
except Discard:
pass
def _transform_tree(self, tree):
children = list(self._transform_children(tree.children))
return self._call_userfunc(tree, children)
def transform(self, tree):
return self._transform_tree(tree)
def __mul__(self, other):
return TransformerChain(self, other)
def __default__(self, data, children, meta):
"Default operation on tree (for override)"
return Tree(data, children, meta)
def __default_token__(self, token):
"Default operation on token (for override)"
return token
class InlineTransformer(Transformer): # XXX Deprecated
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
return f(*children)
class TransformerChain(object):
def __init__(self, *transformers):
self.transformers = transformers
def transform(self, tree):
for t in self.transformers:
tree = t.transform(tree)
return tree
def __mul__(self, other):
return TransformerChain(*self.transformers + (other,))
class Transformer_InPlace(Transformer):
"Non-recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree): # Cancel recursion
return self._call_userfunc(tree)
def transform(self, tree):
for subtree in tree.iter_subtrees():
subtree.children = list(self._transform_children(subtree.children))
return self._transform_tree(tree)
class Transformer_NonRecursive(Transformer):
"Non-recursive. Doesn't change the original tree."
def transform(self, tree):
# Tree to postfix
rev_postfix = []
q = [tree]
while q:
t = q.pop()
rev_postfix.append( t )
if isinstance(t, Tree):
q += t.children
# Postfix to tree
stack = []
for x in reversed(rev_postfix):
if isinstance(x, Tree):
size = len(x.children)
if size:
args = stack[-size:]
del stack[-size:]
else:
args = []
stack.append(self._call_userfunc(x, args))
else:
stack.append(x)
t ,= stack # We should have only one tree remaining
return t
class Transformer_InPlaceRecursive(Transformer):
"Recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree):
tree.children = list(self._transform_children(tree.children))
return self._call_userfunc(tree)
# Visitors
class VisitorBase:
def _call_userfunc(self, tree):
return getattr(self, tree.data, self.__default__)(tree)
def __default__(self, tree):
"Default operation on tree (for override)"
return tree
def __class_getitem__(cls, _):
return cls
class Visitor(VisitorBase):
"""Bottom-up visitor, non-recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for subtree in tree.iter_subtrees():
self._call_userfunc(subtree)
return tree
def visit_topdown(self,tree):
for subtree in tree.iter_subtrees_topdown():
self._call_userfunc(subtree)
return tree
class Visitor_Recursive(VisitorBase):
"""Bottom-up visitor, recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for child in tree.children:
if isinstance(child, Tree):
self.visit(child)
self._call_userfunc(tree)
return tree
def visit_topdown(self,tree):
self._call_userfunc(tree)
for child in tree.children:
if isinstance(child, Tree):
self.visit_topdown(child)
return tree
def visit_children_decor(func):
"See Interpreter"
@wraps(func)
def inner(cls, tree):
values = cls.visit_children(tree)
return func(cls, values)
return inner
class Interpreter(_Decoratable):
"""Top-down visitor, recursive
Visits the tree, starting with the root and finally the leaves (top-down)
Calls its methods (provided by user via inheritance) according to tree.data
Unlike Transformer and Visitor, the Interpreter doesn't automatically visit its sub-branches.
The user has to explicitly call visit_children, or use the @visit_children_decor
"""
def visit(self, tree):
f = getattr(self, tree.data)
wrapper = getattr(f, 'visit_wrapper', None)
if wrapper is not None:
return f.visit_wrapper(f, tree.data, tree.children, tree.meta)
else:
return f(tree)
def visit_children(self, tree):
return [self.visit(child) if isinstance(child, Tree) else child
for child in tree.children]
def __getattr__(self, name):
return self.__default__
def __default__(self, tree):
return self.visit_children(tree)
# Decorators
def _apply_decorator(obj, decorator, **kwargs):
try:
_apply = obj._apply_decorator
except AttributeError:
return decorator(obj, **kwargs)
else:
return _apply(decorator, **kwargs)
def _inline_args__func(func):
@wraps(func)
def create_decorator(_f, with_self):
if with_self:
def f(self, children):
return _f(self, *children)
else:
def f(self, children):
return _f(*children)
return f
return smart_decorator(func, create_decorator)
def inline_args(obj): # XXX Deprecated
return _apply_decorator(obj, _inline_args__func)
def _visitor_args_func_dec(func, visit_wrapper=None, static=False):
def create_decorator(_f, with_self):
if with_self:
def f(self, *args, **kwargs):
return _f(self, *args, **kwargs)
else:
def f(self, *args, **kwargs):
return _f(*args, **kwargs)
return f
if static:
f = wraps(func)(create_decorator(func, False))
else:
f = smart_decorator(func, create_decorator)
f.vargs_applied = True
f.visit_wrapper = visit_wrapper
return f
def _vargs_inline(f, data, children, meta):
return f(*children)
def _vargs_meta_inline(f, data, children, meta):
return f(meta, *children)
def _vargs_meta(f, data, children, meta):
return f(children, meta) # TODO swap these for consistency? Backwards incompatible!
def _vargs_tree(f, data, children, meta):
return f(Tree(data, children, meta))
def v_args(inline=False, meta=False, tree=False, wrapper=None):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if tree and (meta or inline):
raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.")
func = None
if meta:
if inline:
func = _vargs_meta_inline
else:
func = _vargs_meta
elif inline:
func = _vargs_inline
elif tree:
func = _vargs_tree
if wrapper is not None:
if func is not None:
raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.")
func = wrapper
def _visitor_args_dec(obj):
return _apply_decorator(obj, _visitor_args_func_dec, visit_wrapper=func)
return _visitor_args_dec
###}
#--- Visitor Utilities ---
class CollapseAmbiguities(Transformer):
"""
Transforms a tree that contains any number of _ambig nodes into a list of trees,
each one containing an unambiguous tree.
The length of the resulting list is the product of the length of all _ambig nodes.
Warning: This may quickly explode for highly ambiguous trees.
"""
def _ambig(self, options):
return sum(options, [])
def __default__(self, data, children_lists, meta):
return [Tree(data, children, meta) for children in combine_alternatives(children_lists)]
def __default_token__(self, t):
return [t] | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/visitors.py | 0.606964 | 0.152316 | visitors.py | pypi |
from .utils import STRING_TYPE
###{standalone
class LarkError(Exception):
pass
class GrammarError(LarkError):
pass
class ParseError(LarkError):
pass
class LexError(LarkError):
pass
class UnexpectedEOF(ParseError):
def __init__(self, expected):
self.expected = expected
message = ("Unexpected end-of-input. Expected one of: \n\t* %s\n" % '\n\t* '.join(x.name for x in self.expected))
super(UnexpectedEOF, self).__init__(message)
class UnexpectedInput(LarkError):
pos_in_stream = None
def get_context(self, text, span=40):
pos = self.pos_in_stream
start = max(pos - span, 0)
end = pos + span
before = text[start:pos].rsplit('\n', 1)[-1]
after = text[pos:end].split('\n', 1)[0]
return before + after + '\n' + ' ' * len(before) + '^\n'
def match_examples(self, parse_fn, examples, token_type_match_fallback=False):
""" Given a parser instance and a dictionary mapping some label with
some malformed syntax examples, it'll return the label for the
example that bests matches the current error.
"""
assert self.state is not None, "Not supported for this exception"
candidate = (None, False)
for label, example in examples.items():
assert not isinstance(example, STRING_TYPE)
for malformed in example:
try:
parse_fn(malformed)
except UnexpectedInput as ut:
if ut.state == self.state:
try:
if ut.token == self.token: # Try exact match first
return label
if token_type_match_fallback:
# Fallback to token types match
if (ut.token.type == self.token.type) and not candidate[-1]:
candidate = label, True
except AttributeError:
pass
if not candidate[0]:
candidate = label, False
return candidate[0]
class UnexpectedCharacters(LexError, UnexpectedInput):
def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None):
message = "No terminal defined for '%s' at line %d col %d" % (seq[lex_pos], line, column)
self.line = line
self.column = column
self.allowed = allowed
self.considered_tokens = considered_tokens
self.pos_in_stream = lex_pos
self.state = state
message += '\n\n' + self.get_context(seq)
if allowed:
message += '\nExpecting: %s\n' % allowed
if token_history:
message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in token_history)
super(UnexpectedCharacters, self).__init__(message)
class UnexpectedToken(ParseError, UnexpectedInput):
def __init__(self, token, expected, considered_rules=None, state=None, puppet=None):
self.token = token
self.expected = expected # XXX str shouldn't necessary
self.line = getattr(token, 'line', '?')
self.column = getattr(token, 'column', '?')
self.considered_rules = considered_rules
self.state = state
self.pos_in_stream = getattr(token, 'pos_in_stream', None)
self.puppet = puppet
message = ("Unexpected token %r at line %s, column %s.\n"
"Expected one of: \n\t* %s\n"
% (token, self.line, self.column, '\n\t* '.join(self.expected)))
super(UnexpectedToken, self).__init__(message)
class VisitError(LarkError):
"""VisitError is raised when visitors are interrupted by an exception
It provides the following attributes for inspection:
- obj: the tree node or token it was processing when the exception was raised
- orig_exc: the exception that cause it to fail
"""
def __init__(self, rule, obj, orig_exc):
self.obj = obj
self.orig_exc = orig_exc
message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc)
super(VisitError, self).__init__(message)
###} | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/exceptions.py | 0.59302 | 0.357596 | exceptions.py | pypi |
import re
from .utils import Str, classify, get_regexp_width, Py36, Serialize
from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
###{standalone
class Pattern(Serialize):
def __init__(self, value, flags=()):
self.value = value
self.flags = frozenset(flags)
def __repr__(self):
return repr(self.to_regexp())
# Pattern Hashing assumes all subclasses have a different priority!
def __hash__(self):
return hash((type(self), self.value, self.flags))
def __eq__(self, other):
return type(self) == type(other) and self.value == other.value and self.flags == other.flags
def to_regexp(self):
raise NotImplementedError()
if Py36:
# Python 3.6 changed syntax for flags in regular expression
def _get_flags(self, value):
for f in self.flags:
value = ('(?%s:%s)' % (f, value))
return value
else:
def _get_flags(self, value):
for f in self.flags:
value = ('(?%s)' % f) + value
return value
class PatternStr(Pattern):
__serialize_fields__ = 'value', 'flags'
type = "str"
def to_regexp(self):
return self._get_flags(re.escape(self.value))
@property
def min_width(self):
return len(self.value)
max_width = min_width
class PatternRE(Pattern):
__serialize_fields__ = 'value', 'flags', '_width'
type = "re"
def to_regexp(self):
return self._get_flags(self.value)
_width = None
def _get_width(self):
if self._width is None:
self._width = get_regexp_width(self.to_regexp())
return self._width
@property
def min_width(self):
return self._get_width()[0]
@property
def max_width(self):
return self._get_width()[1]
class TerminalDef(Serialize):
__serialize_fields__ = 'name', 'pattern', 'priority'
__serialize_namespace__ = PatternStr, PatternRE
def __init__(self, name, pattern, priority=1):
assert isinstance(pattern, Pattern), pattern
self.name = name
self.pattern = pattern
self.priority = priority
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
class Token(Str):
__slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
try:
self = super(Token, cls).__new__(cls, value)
except UnicodeDecodeError:
value = value.decode('latin1')
self = super(Token, cls).__new__(cls, value)
self.type = type_
self.pos_in_stream = pos_in_stream
self.value = value
self.line = line
self.column = column
self.end_line = end_line
self.end_column = end_column
self.end_pos = end_pos
return self
def update(self, type_=None, value=None):
return Token.new_borrow_pos(
type_ if type_ is not None else self.type,
value if value is not None else self.value,
self
)
@classmethod
def new_borrow_pos(cls, type_, value, borrow_t):
return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
def __reduce__(self):
return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column, ))
def __repr__(self):
return 'Token(%s, %r)' % (self.type, self.value)
def __deepcopy__(self, memo):
return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
def __eq__(self, other):
if isinstance(other, Token) and self.type != other.type:
return False
return Str.__eq__(self, other)
__hash__ = Str.__hash__
class LineCounter:
def __init__(self):
self.newline_char = '\n'
self.char_pos = 0
self.line = 1
self.column = 1
self.line_start_pos = 0
def feed(self, token, test_newline=True):
"""Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is token doesn't contain a newline.
"""
if test_newline:
newlines = token.count(self.newline_char)
if newlines:
self.line += newlines
self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
self.char_pos += len(token)
self.column = self.char_pos - self.line_start_pos + 1
class _Lex:
"Built to serve both Lexer and ContextualLexer"
def __init__(self, lexer, state=None):
self.lexer = lexer
self.state = state
def lex(self, stream, newline_types, ignore_types):
newline_types = frozenset(newline_types)
ignore_types = frozenset(ignore_types)
line_ctr = LineCounter()
last_token = None
while line_ctr.char_pos < len(stream):
lexer = self.lexer
res = lexer.match(stream, line_ctr.char_pos)
if not res:
allowed = {v for m, tfi in lexer.mres for v in tfi.values()} - ignore_types
if not allowed:
allowed = {"<END-OF-FILE>"}
raise UnexpectedCharacters(stream, line_ctr.char_pos, line_ctr.line, line_ctr.column, allowed=allowed, state=self.state, token_history=last_token and [last_token])
value, type_ = res
if type_ not in ignore_types:
t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
line_ctr.feed(value, type_ in newline_types)
t.end_line = line_ctr.line
t.end_column = line_ctr.column
t.end_pos = line_ctr.char_pos
if t.type in lexer.callback:
t = lexer.callback[t.type](t)
if not isinstance(t, Token):
raise ValueError("Callbacks must return a token (returned %r)" % t)
yield t
last_token = t
else:
if type_ in lexer.callback:
t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
lexer.callback[type_](t2)
line_ctr.feed(value, type_ in newline_types)
class UnlessCallback:
def __init__(self, mres):
self.mres = mres
def __call__(self, t):
for mre, type_from_index in self.mres:
m = mre.match(t.value)
if m:
t.type = type_from_index[m.lastindex]
break
return t
class CallChain:
def __init__(self, callback1, callback2, cond):
self.callback1 = callback1
self.callback2 = callback2
self.cond = cond
def __call__(self, t):
t2 = self.callback1(t)
return self.callback2(t) if self.cond(t2) else t2
def _create_unless(terminals, g_regex_flags, re_):
tokens_by_type = classify(terminals, lambda t: type(t.pattern))
assert len(tokens_by_type) <= 2, tokens_by_type.keys()
embedded_strs = set()
callback = {}
for retok in tokens_by_type.get(PatternRE, []):
unless = [] # {}
for strtok in tokens_by_type.get(PatternStr, []):
if strtok.priority > retok.priority:
continue
s = strtok.pattern.value
m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags)
if m and m.group(0) == s:
unless.append(strtok)
if strtok.pattern.flags <= retok.pattern.flags:
embedded_strs.add(strtok)
if unless:
callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True))
terminals = [t for t in terminals if t not in embedded_strs]
return terminals, callback
def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_):
# Python sets an unreasonable group limit (currently 100) in its re module
# Worse, the only way to know we reached it is by catching an AssertionError!
# This function recursively tries less and less groups until it's successful.
postfix = '$' if match_whole else ''
mres = []
while terminals:
try:
mre = re_.compile(u'|'.join(u'(?P<%s>%s)'%(t.name, t.pattern.to_regexp()+postfix) for t in terminals[:max_size]), g_regex_flags)
except AssertionError: # Yes, this is what Python provides us.. :/
return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_)
# terms_from_name = {t.name: t for t in terminals[:max_size]}
mres.append((mre, {i:n for n,i in mre.groupindex.items()} ))
terminals = terminals[max_size:]
return mres
def build_mres(terminals, g_regex_flags, re_, match_whole=False):
return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_)
def _regexp_has_newline(r):
r"""Expressions that may indicate newlines in a regexp:
- newlines (\n)
- escaped newline (\\n)
- anything but ([^...])
- any-char (.) when the flag (?s) exists
- spaces (\s)
"""
return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
class Lexer(object):
"""Lexer interface
Method Signatures:
lex(self, stream) -> Iterator[Token]
"""
lex = NotImplemented
class TraditionalLexer(Lexer):
def __init__(self, terminals, re_, ignore=(), user_callbacks={}, g_regex_flags=0):
assert all(isinstance(t, TerminalDef) for t in terminals), terminals
terminals = list(terminals)
self.re = re_
# Sanitization
for t in terminals:
try:
self.re.compile(t.pattern.to_regexp(), g_regex_flags)
except self.re.error:
raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
if t.pattern.min_width == 0:
raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
assert set(ignore) <= {t.name for t in terminals}
# Init
self.newline_types = [t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())]
self.ignore_types = list(ignore)
terminals.sort(key=lambda x:(-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
self.terminals = terminals
self.user_callbacks = user_callbacks
self.build(g_regex_flags)
def build(self, g_regex_flags=0):
terminals, self.callback = _create_unless(self.terminals, g_regex_flags, re_=self.re)
assert all(self.callback.values())
for type_, f in self.user_callbacks.items():
if type_ in self.callback:
# Already a callback there, probably UnlessCallback
self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
else:
self.callback[type_] = f
self.mres = build_mres(terminals, g_regex_flags, self.re)
def match(self, stream, pos):
for mre, type_from_index in self.mres:
m = mre.match(stream, pos)
if m:
return m.group(0), type_from_index[m.lastindex]
def lex(self, stream):
return _Lex(self).lex(stream, self.newline_types, self.ignore_types)
class ContextualLexer(Lexer):
def __init__(self, terminals, states, re_, ignore=(), always_accept=(), user_callbacks={}, g_regex_flags=0):
self.re = re_
tokens_by_name = {}
for t in terminals:
assert t.name not in tokens_by_name, t
tokens_by_name[t.name] = t
lexer_by_tokens = {}
self.lexers = {}
for state, accepts in states.items():
key = frozenset(accepts)
try:
lexer = lexer_by_tokens[key]
except KeyError:
accepts = set(accepts) | set(ignore) | set(always_accept)
state_tokens = [tokens_by_name[n] for n in accepts if n and n in tokens_by_name]
lexer = TraditionalLexer(state_tokens, re_=self.re, ignore=ignore, user_callbacks=user_callbacks, g_regex_flags=g_regex_flags)
lexer_by_tokens[key] = lexer
self.lexers[state] = lexer
self.root_lexer = TraditionalLexer(terminals, re_=self.re, ignore=ignore, user_callbacks=user_callbacks, g_regex_flags=g_regex_flags)
def lex(self, stream, get_parser_state):
parser_state = get_parser_state()
l = _Lex(self.lexers[parser_state], parser_state)
try:
for x in l.lex(stream, self.root_lexer.newline_types, self.root_lexer.ignore_types):
yield x
parser_state = get_parser_state()
l.lexer = self.lexers[parser_state]
l.state = parser_state # For debug only, no need to worry about multithreading
except UnexpectedCharacters as e:
# In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined,
# but not in the current context.
# This tests the input against the global context, to provide a nicer error.
root_match = self.root_lexer.match(stream, e.pos_in_stream)
if not root_match:
raise
value, type_ = root_match
t = Token(type_, value, e.pos_in_stream, e.line, e.column)
raise UnexpectedToken(t, e.allowed, state=e.state)
###} | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/lexer.py | 0.69987 | 0.200401 | lexer.py | pypi |
from random import randint
from math import isinf
from collections import deque
from operator import attrgetter
from importlib import import_module
from ..tree import Tree
from ..exceptions import ParseError
class ForestNode(object):
pass
class SymbolNode(ForestNode):
"""
A Symbol Node represents a symbol (or Intermediate LR0).
Symbol nodes are keyed by the symbol (s). For intermediate nodes
s will be an LR0, stored as a tuple of (rule, ptr). For completed symbol
nodes, s will be a string representing the non-terminal origin (i.e.
the left hand side of the rule).
The children of a Symbol or Intermediate Node will always be Packed Nodes;
with each Packed Node child representing a single derivation of a production.
Hence a Symbol Node with a single child is unambiguous.
"""
__slots__ = ('s', 'start', 'end', '_children', 'paths', 'paths_loaded', 'priority', 'is_intermediate', '_hash')
def __init__(self, s, start, end):
self.s = s
self.start = start
self.end = end
self._children = set()
self.paths = set()
self.paths_loaded = False
### We use inf here as it can be safely negated without resorting to conditionals,
# unlike None or float('NaN'), and sorts appropriately.
self.priority = float('-inf')
self.is_intermediate = isinstance(s, tuple)
self._hash = hash((self.s, self.start, self.end))
def add_family(self, lr0, rule, start, left, right):
self._children.add(PackedNode(self, lr0, rule, start, left, right))
def add_path(self, transitive, node):
self.paths.add((transitive, node))
def load_paths(self):
for transitive, node in self.paths:
if transitive.next_titem is not None:
vn = SymbolNode(transitive.next_titem.s, transitive.next_titem.start, self.end)
vn.add_path(transitive.next_titem, node)
self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, vn)
else:
self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, node)
self.paths_loaded = True
@property
def is_ambiguous(self):
return len(self.children) > 1
@property
def children(self):
if not self.paths_loaded: self.load_paths()
return sorted(self._children, key=attrgetter('sort_key'))
def __iter__(self):
return iter(self._children)
def __eq__(self, other):
if not isinstance(other, SymbolNode):
return False
return self is other or (type(self.s) == type(other.s) and self.s == other.s and self.start == other.start and self.end is other.end)
def __hash__(self):
return self._hash
def __repr__(self):
if self.is_intermediate:
rule = self.s[0]
ptr = self.s[1]
before = ( expansion.name for expansion in rule.expansion[:ptr] )
after = ( expansion.name for expansion in rule.expansion[ptr:] )
symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after))
else:
symbol = self.s.name
return "({}, {}, {}, {})".format(symbol, self.start, self.end, self.priority)
class PackedNode(ForestNode):
"""
A Packed Node represents a single derivation in a symbol node.
"""
__slots__ = ('parent', 's', 'rule', 'start', 'left', 'right', 'priority', '_hash')
def __init__(self, parent, s, rule, start, left, right):
self.parent = parent
self.s = s
self.start = start
self.rule = rule
self.left = left
self.right = right
self.priority = float('-inf')
self._hash = hash((self.left, self.right))
@property
def is_empty(self):
return self.left is None and self.right is None
@property
def sort_key(self):
"""
Used to sort PackedNode children of SymbolNodes.
A SymbolNode has multiple PackedNodes if it matched
ambiguously. Hence, we use the sort order to identify
the order in which ambiguous children should be considered.
"""
return self.is_empty, -self.priority, self.rule.order
def __iter__(self):
return iter([self.left, self.right])
def __eq__(self, other):
if not isinstance(other, PackedNode):
return False
return self is other or (self.left == other.left and self.right == other.right)
def __hash__(self):
return self._hash
def __repr__(self):
if isinstance(self.s, tuple):
rule = self.s[0]
ptr = self.s[1]
before = ( expansion.name for expansion in rule.expansion[:ptr] )
after = ( expansion.name for expansion in rule.expansion[ptr:] )
symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after))
else:
symbol = self.s.name
return "({}, {}, {}, {})".format(symbol, self.start, self.priority, self.rule.order)
class ForestVisitor(object):
"""
An abstract base class for building forest visitors.
Use this as a base when you need to walk the forest.
"""
__slots__ = ['result']
def visit_token_node(self, node): pass
def visit_symbol_node_in(self, node): pass
def visit_symbol_node_out(self, node): pass
def visit_packed_node_in(self, node): pass
def visit_packed_node_out(self, node): pass
def visit(self, root):
self.result = None
# Visiting is a list of IDs of all symbol/intermediate nodes currently in
# the stack. It serves two purposes: to detect when we 'recurse' in and out
# of a symbol/intermediate so that we can process both up and down. Also,
# since the SPPF can have cycles it allows us to detect if we're trying
# to recurse into a node that's already on the stack (infinite recursion).
visiting = set()
# We do not use recursion here to walk the Forest due to the limited
# stack size in python. Therefore input_stack is essentially our stack.
input_stack = deque([root])
# It is much faster to cache these as locals since they are called
# many times in large parses.
vpno = getattr(self, 'visit_packed_node_out')
vpni = getattr(self, 'visit_packed_node_in')
vsno = getattr(self, 'visit_symbol_node_out')
vsni = getattr(self, 'visit_symbol_node_in')
vtn = getattr(self, 'visit_token_node')
while input_stack:
current = next(reversed(input_stack))
try:
next_node = next(current)
except StopIteration:
input_stack.pop()
continue
except TypeError:
### If the current object is not an iterator, pass through to Token/SymbolNode
pass
else:
if next_node is None:
continue
if id(next_node) in visiting:
raise ParseError("Infinite recursion in grammar, in rule '%s'!" % next_node.s.name)
input_stack.append(next_node)
continue
if not isinstance(current, ForestNode):
vtn(current)
input_stack.pop()
continue
current_id = id(current)
if current_id in visiting:
if isinstance(current, PackedNode): vpno(current)
else: vsno(current)
input_stack.pop()
visiting.remove(current_id)
continue
else:
visiting.add(current_id)
if isinstance(current, PackedNode): next_node = vpni(current)
else: next_node = vsni(current)
if next_node is None:
continue
if id(next_node) in visiting:
raise ParseError("Infinite recursion in grammar!")
input_stack.append(next_node)
continue
return self.result
class ForestSumVisitor(ForestVisitor):
"""
A visitor for prioritizing ambiguous parts of the Forest.
This visitor is used when support for explicit priorities on
rules is requested (whether normal, or invert). It walks the
forest (or subsets thereof) and cascades properties upwards
from the leaves.
It would be ideal to do this during parsing, however this would
require processing each Earley item multiple times. That's
a big performance drawback; so running a forest walk is the
lesser of two evils: there can be significantly more Earley
items created during parsing than there are SPPF nodes in the
final tree.
"""
def visit_packed_node_in(self, node):
return iter([node.left, node.right])
def visit_symbol_node_in(self, node):
return iter(node.children)
def visit_packed_node_out(self, node):
priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options.priority else 0
priority += getattr(node.right, 'priority', 0)
priority += getattr(node.left, 'priority', 0)
node.priority = priority
def visit_symbol_node_out(self, node):
node.priority = max(child.priority for child in node.children)
class ForestToTreeVisitor(ForestVisitor):
"""
A Forest visitor which converts an SPPF forest to an unambiguous AST.
The implementation in this visitor walks only the first ambiguous child
of each symbol node. When it finds an ambiguous symbol node it first
calls the forest_sum_visitor implementation to sort the children
into preference order using the algorithms defined there; so the first
child should always be the highest preference. The forest_sum_visitor
implementation should be another ForestVisitor which sorts the children
according to some priority mechanism.
"""
__slots__ = ['forest_sum_visitor', 'callbacks', 'output_stack']
def __init__(self, callbacks, forest_sum_visitor = None):
assert callbacks
self.forest_sum_visitor = forest_sum_visitor
self.callbacks = callbacks
def visit(self, root):
self.output_stack = deque()
return super(ForestToTreeVisitor, self).visit(root)
def visit_token_node(self, node):
self.output_stack[-1].append(node)
def visit_symbol_node_in(self, node):
if self.forest_sum_visitor and node.is_ambiguous and isinf(node.priority):
self.forest_sum_visitor.visit(node)
return next(iter(node.children))
def visit_packed_node_in(self, node):
if not node.parent.is_intermediate:
self.output_stack.append([])
return iter([node.left, node.right])
def visit_packed_node_out(self, node):
if not node.parent.is_intermediate:
result = self.callbacks[node.rule](self.output_stack.pop())
if self.output_stack:
self.output_stack[-1].append(result)
else:
self.result = result
class ForestToAmbiguousTreeVisitor(ForestToTreeVisitor):
"""
A Forest visitor which converts an SPPF forest to an ambiguous AST.
Because of the fundamental disparity between what can be stored in
an SPPF and what can be stored in a Tree; this implementation is not
complete. It correctly deals with ambiguities that occur on symbol nodes only,
and cannot deal with ambiguities that occur on intermediate nodes.
Usually, most parsers can be rewritten to avoid intermediate node
ambiguities. Also, this implementation could be fixed, however
the code to handle intermediate node ambiguities is messy and
would not be performant. It is much better not to use this and
instead to correctly disambiguate the forest and only store unambiguous
parses in Trees. It is here just to provide some parity with the
old ambiguity='explicit'.
This is mainly used by the test framework, to make it simpler to write
tests ensuring the SPPF contains the right results.
"""
def __init__(self, callbacks, forest_sum_visitor = ForestSumVisitor):
super(ForestToAmbiguousTreeVisitor, self).__init__(callbacks, forest_sum_visitor)
def visit_token_node(self, node):
self.output_stack[-1].children.append(node)
def visit_symbol_node_in(self, node):
if self.forest_sum_visitor and node.is_ambiguous and isinf(node.priority):
self.forest_sum_visitor.visit(node)
if not node.is_intermediate and node.is_ambiguous:
self.output_stack.append(Tree('_ambig', []))
return iter(node.children)
def visit_symbol_node_out(self, node):
if not node.is_intermediate and node.is_ambiguous:
result = self.output_stack.pop()
if self.output_stack:
self.output_stack[-1].children.append(result)
else:
self.result = result
def visit_packed_node_in(self, node):
if not node.parent.is_intermediate:
self.output_stack.append(Tree('drv', []))
return iter([node.left, node.right])
def visit_packed_node_out(self, node):
if not node.parent.is_intermediate:
result = self.callbacks[node.rule](self.output_stack.pop().children)
if self.output_stack:
self.output_stack[-1].children.append(result)
else:
self.result = result
class ForestToPyDotVisitor(ForestVisitor):
"""
A Forest visitor which writes the SPPF to a PNG.
The SPPF can get really large, really quickly because
of the amount of meta-data it stores, so this is probably
only useful for trivial trees and learning how the SPPF
is structured.
"""
def __init__(self, rankdir="TB"):
self.pydot = import_module('pydot')
self.graph = self.pydot.Dot(graph_type='digraph', rankdir=rankdir)
def visit(self, root, filename):
super(ForestToPyDotVisitor, self).visit(root)
self.graph.write_png(filename)
def visit_token_node(self, node):
graph_node_id = str(id(node))
graph_node_label = "\"{}\"".format(node.value.replace('"', '\\"'))
graph_node_color = 0x808080
graph_node_style = "\"filled,rounded\""
graph_node_shape = "diamond"
graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label)
self.graph.add_node(graph_node)
def visit_packed_node_in(self, node):
graph_node_id = str(id(node))
graph_node_label = repr(node)
graph_node_color = 0x808080
graph_node_style = "filled"
graph_node_shape = "diamond"
graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label)
self.graph.add_node(graph_node)
return iter([node.left, node.right])
def visit_packed_node_out(self, node):
graph_node_id = str(id(node))
graph_node = self.graph.get_node(graph_node_id)[0]
for child in [node.left, node.right]:
if child is not None:
child_graph_node_id = str(id(child))
child_graph_node = self.graph.get_node(child_graph_node_id)[0]
self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node))
else:
#### Try and be above the Python object ID range; probably impl. specific, but maybe this is okay.
child_graph_node_id = str(randint(100000000000000000000000000000,123456789012345678901234567890))
child_graph_node_style = "invis"
child_graph_node = self.pydot.Node(child_graph_node_id, style=child_graph_node_style, label="None")
child_edge_style = "invis"
self.graph.add_node(child_graph_node)
self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node, style=child_edge_style))
def visit_symbol_node_in(self, node):
graph_node_id = str(id(node))
graph_node_label = repr(node)
graph_node_color = 0x808080
graph_node_style = "\"filled\""
if node.is_intermediate:
graph_node_shape = "ellipse"
else:
graph_node_shape = "rectangle"
graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label)
self.graph.add_node(graph_node)
return iter(node.children)
def visit_symbol_node_out(self, node):
graph_node_id = str(id(node))
graph_node = self.graph.get_node(graph_node_id)[0]
for child in node.children:
child_graph_node_id = str(id(child))
child_graph_node = self.graph.get_node(child_graph_node_id)[0]
self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parsers/earley_forest.py | 0.756942 | 0.380817 | earley_forest.py | pypi |
from copy import deepcopy
from .lalr_analysis import Shift, Reduce
class ParserPuppet:
def __init__(self, parser, state_stack, value_stack, start, stream, set_state):
self.parser = parser
self._state_stack = state_stack
self._value_stack = value_stack
self._start = start
self._stream = stream
self._set_state = set_state
self.result = None
def feed_token(self, token):
"""Advance the parser state, as if it just recieved `token` from the lexer
"""
end_state = self.parser.parse_table.end_states[self._start]
state_stack = self._state_stack
value_stack = self._value_stack
state = state_stack[-1]
action, arg = self.parser.parse_table.states[state][token.type]
assert arg != end_state
while action is Reduce:
rule = arg
size = len(rule.expansion)
if size:
s = value_stack[-size:]
del state_stack[-size:]
del value_stack[-size:]
else:
s = []
value = self.parser.callbacks[rule](s)
_action, new_state = self.parser.parse_table.states[state_stack[-1]][rule.origin.name]
assert _action is Shift
state_stack.append(new_state)
value_stack.append(value)
if state_stack[-1] == end_state:
self.result = value_stack[-1]
return self.result
state = state_stack[-1]
action, arg = self.parser.parse_table.states[state][token.type]
assert arg != end_state
assert action is Shift
state_stack.append(arg)
value_stack.append(token)
def copy(self):
return type(self)(
self.parser,
list(self._state_stack),
deepcopy(self._value_stack),
self._start,
self._stream,
self._set_state,
)
def pretty():
print("Puppet choices:")
for k, v in self.choices.items():
print('\t-', k, '->', v)
print('stack size:', len(self._state_stack))
def choices(self):
return self.parser.parse_table.states[self._state_stack[-1]]
def resume_parse(self):
return self.parser.parse(self._stream, self._start, self._set_state, self._value_stack, self._state_stack) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parsers/lalr_puppet.py | 0.792344 | 0.388531 | lalr_puppet.py | pypi |
from collections import defaultdict
from ..exceptions import UnexpectedCharacters
from ..lexer import Token
from ..grammar import Terminal
from .earley import Parser as BaseParser
from .earley_forest import SymbolNode
class Parser(BaseParser):
def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False, debug=False):
BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity, debug)
self.ignore = [Terminal(t) for t in ignore]
self.complete_lex = complete_lex
def _parse(self, stream, columns, to_scan, start_symbol=None):
def scan(i, to_scan):
"""The core Earley Scanner.
This is a custom implementation of the scanner that uses the
Lark lexer to match tokens. The scan list is built by the
Earley predictor, based on the previously completed tokens.
This ensures that at each phase of the parse we have a custom
lexer context, allowing for more complex ambiguities."""
node_cache = {}
# 1) Loop the expectations and ask the lexer to match.
# Since regexp is forward looking on the input stream, and we only
# want to process tokens when we hit the point in the stream at which
# they complete, we push all tokens into a buffer (delayed_matches), to
# be held possibly for a later parse step when we reach the point in the
# input stream at which they complete.
for item in set(to_scan):
m = match(item.expect, stream, i)
if m:
t = Token(item.expect.name, m.group(0), i, text_line, text_column)
delayed_matches[m.end()].append( (item, i, t) )
if self.complete_lex:
s = m.group(0)
for j in range(1, len(s)):
m = match(item.expect, s[:-j])
if m:
t = Token(item.expect.name, m.group(0), i, text_line, text_column)
delayed_matches[i+m.end()].append( (item, i, t) )
# Remove any items that successfully matched in this pass from the to_scan buffer.
# This ensures we don't carry over tokens that already matched, if we're ignoring below.
to_scan.remove(item)
# 3) Process any ignores. This is typically used for e.g. whitespace.
# We carry over any unmatched items from the to_scan buffer to be matched again after
# the ignore. This should allow us to use ignored symbols in non-terminals to implement
# e.g. mandatory spacing.
for x in self.ignore:
m = match(x, stream, i)
if m:
# Carry over any items still in the scan buffer, to past the end of the ignored items.
delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ])
# If we're ignoring up to the end of the file, # carry over the start symbol if it already completed.
delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol])
next_to_scan = set()
next_set = set()
columns.append(next_set)
transitives.append({})
## 4) Process Tokens from delayed_matches.
# This is the core of the Earley scanner. Create an SPPF node for each Token,
# and create the symbol node in the SPPF tree. Advance the item that completed,
# and add the resulting new item to either the Earley set (for processing by the
# completer/predictor) or the to_scan buffer for the next parse step.
for item, start, token in delayed_matches[i+1]:
if token is not None:
token.end_line = text_line
token.end_column = text_column + 1
token.end_pos = i + 1
new_item = item.advance()
label = (new_item.s, new_item.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
else:
new_item = item
if new_item.expect in self.TERMINALS:
# add (B ::= Aai+1.B, h, y) to Q'
next_to_scan.add(new_item)
else:
# add (B ::= Aa+1.B, h, y) to Ei+1
next_set.add(new_item)
del delayed_matches[i+1] # No longer needed, so unburden memory
if not next_set and not delayed_matches and not next_to_scan:
raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan))
return next_to_scan
delayed_matches = defaultdict(list)
match = self.term_matcher
# Cache for nodes & tokens created in a particular parse step.
transitives = [{}]
text_line = 1
text_column = 1
## The main Earley loop.
# Run the Prediction/Completion cycle for any Items in the current Earley set.
# Completions will be added to the SPPF tree, and predictions will be recursively
# processed down to terminals/empty nodes to be added to the scanner for the next
# step.
i = 0
for token in stream:
self.predict_and_complete(i, to_scan, columns, transitives)
to_scan = scan(i, to_scan)
if token == '\n':
text_line += 1
text_column = 1
else:
text_column += 1
i += 1
self.predict_and_complete(i, to_scan, columns, transitives)
## Column is now the final column in the parse.
assert i == len(columns)-1
return to_scan | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parsers/xearley.py | 0.723798 | 0.462109 | xearley.py | pypi |
import logging
from collections import deque
from ..visitors import Transformer_InPlace, v_args
from ..exceptions import UnexpectedEOF, UnexpectedToken
from .grammar_analysis import GrammarAnalyzer
from ..grammar import NonTerminal
from .earley_common import Item, TransitiveItem
from .earley_forest import ForestToTreeVisitor, ForestSumVisitor, SymbolNode, ForestToAmbiguousTreeVisitor
class Parser:
def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, debug=False):
analysis = GrammarAnalyzer(parser_conf)
self.parser_conf = parser_conf
self.resolve_ambiguity = resolve_ambiguity
self.debug = debug
self.FIRST = analysis.FIRST
self.NULLABLE = analysis.NULLABLE
self.callbacks = parser_conf.callbacks
self.predictions = {}
## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than
# the slow 'isupper' in is_terminal.
self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term }
self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term }
self.forest_sum_visitor = None
for rule in parser_conf.rules:
if rule.origin not in self.predictions:
self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)]
## Detect if any rules have priorities set. If the user specified priority = "none" then
# the priorities will be stripped from all rules before they reach us, allowing us to
# skip the extra tree walk. We'll also skip this if the user just didn't specify priorities
# on any rules.
if self.forest_sum_visitor is None and rule.options.priority is not None:
self.forest_sum_visitor = ForestSumVisitor
self.term_matcher = term_matcher
def predict_and_complete(self, i, to_scan, columns, transitives):
"""The core Earley Predictor and Completer.
At each stage of the input, we handling any completed items (things
that matched on the last cycle) and use those to predict what should
come next in the input stream. The completions and any predicted
non-terminals are recursively processed until we reach a set of,
which can be added to the scan list for the next scanner cycle."""
# Held Completions (H in E.Scotts paper).
node_cache = {}
held_completions = {}
column = columns[i]
# R (items) = Ei (column.items)
items = deque(column)
while items:
item = items.pop() # remove an element, A say, from R
### The Earley completer
if item.is_complete: ### (item.s == string)
if item.node is None:
label = (item.s, item.start, i)
item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
item.node.add_family(item.s, item.rule, item.start, None, None)
# create_leo_transitives(item.rule.origin, item.start)
###R Joop Leo right recursion Completer
if item.rule.origin in transitives[item.start]:
transitive = transitives[item.start][item.s]
if transitive.previous in transitives[transitive.column]:
root_transitive = transitives[transitive.column][transitive.previous]
else:
root_transitive = transitive
new_item = Item(transitive.rule, transitive.ptr, transitive.start)
label = (root_transitive.s, root_transitive.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_path(root_transitive, item.node)
if new_item.expect in self.TERMINALS:
# Add (B :: aC.B, h, y) to Q
to_scan.add(new_item)
elif new_item not in column:
# Add (B :: aC.B, h, y) to Ei and R
column.add(new_item)
items.append(new_item)
###R Regular Earley completer
else:
# Empty has 0 length. If we complete an empty symbol in a particular
# parse step, we need to be able to use that same empty symbol to complete
# any predictions that result, that themselves require empty. Avoids
# infinite recursion on empty symbols.
# held_completions is 'H' in E.Scott's paper.
is_empty_item = item.start == i
if is_empty_item:
held_completions[item.rule.origin] = item.node
originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s]
for originator in originators:
new_item = originator.advance()
label = (new_item.s, originator.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node)
if new_item.expect in self.TERMINALS:
# Add (B :: aC.B, h, y) to Q
to_scan.add(new_item)
elif new_item not in column:
# Add (B :: aC.B, h, y) to Ei and R
column.add(new_item)
items.append(new_item)
### The Earley predictor
elif item.expect in self.NON_TERMINALS: ### (item.s == lr0)
new_items = []
for rule in self.predictions[item.expect]:
new_item = Item(rule, 0, i)
new_items.append(new_item)
# Process any held completions (H).
if item.expect in held_completions:
new_item = item.advance()
label = (new_item.s, item.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect])
new_items.append(new_item)
for new_item in new_items:
if new_item.expect in self.TERMINALS:
to_scan.add(new_item)
elif new_item not in column:
column.add(new_item)
items.append(new_item)
def _parse(self, stream, columns, to_scan, start_symbol=None):
def is_quasi_complete(item):
if item.is_complete:
return True
quasi = item.advance()
while not quasi.is_complete:
if quasi.expect not in self.NULLABLE:
return False
if quasi.rule.origin == start_symbol and quasi.expect == start_symbol:
return False
quasi = quasi.advance()
return True
def create_leo_transitives(origin, start):
visited = set()
to_create = []
trule = None
previous = None
### Recursively walk backwards through the Earley sets until we find the
# first transitive candidate. If this is done continuously, we shouldn't
# have to walk more than 1 hop.
while True:
if origin in transitives[start]:
previous = trule = transitives[start][origin]
break
is_empty_rule = not self.FIRST[origin]
if is_empty_rule:
break
candidates = [ candidate for candidate in columns[start] if candidate.expect is not None and origin == candidate.expect ]
if len(candidates) != 1:
break
originator = next(iter(candidates))
if originator is None or originator in visited:
break
visited.add(originator)
if not is_quasi_complete(originator):
break
trule = originator.advance()
if originator.start != start:
visited.clear()
to_create.append((origin, start, originator))
origin = originator.rule.origin
start = originator.start
# If a suitable Transitive candidate is not found, bail.
if trule is None:
return
#### Now walk forwards and create Transitive Items in each set we walked through; and link
# each transitive item to the next set forwards.
while to_create:
origin, start, originator = to_create.pop()
titem = None
if previous is not None:
titem = previous.next_titem = TransitiveItem(origin, trule, originator, previous.column)
else:
titem = TransitiveItem(origin, trule, originator, start)
previous = transitives[start][origin] = titem
def scan(i, token, to_scan):
"""The core Earley Scanner.
This is a custom implementation of the scanner that uses the
Lark lexer to match tokens. The scan list is built by the
Earley predictor, based on the previously completed tokens.
This ensures that at each phase of the parse we have a custom
lexer context, allowing for more complex ambiguities."""
next_to_scan = set()
next_set = set()
columns.append(next_set)
transitives.append({})
node_cache = {}
for item in set(to_scan):
if match(item.expect, token):
new_item = item.advance()
label = (new_item.s, new_item.start, i)
new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
if new_item.expect in self.TERMINALS:
# add (B ::= Aai+1.B, h, y) to Q'
next_to_scan.add(new_item)
else:
# add (B ::= Aa+1.B, h, y) to Ei+1
next_set.add(new_item)
if not next_set and not next_to_scan:
expect = {i.expect.name for i in to_scan}
raise UnexpectedToken(token, expect, considered_rules = set(to_scan))
return next_to_scan
# Define parser functions
match = self.term_matcher
# Cache for nodes & tokens created in a particular parse step.
transitives = [{}]
## The main Earley loop.
# Run the Prediction/Completion cycle for any Items in the current Earley set.
# Completions will be added to the SPPF tree, and predictions will be recursively
# processed down to terminals/empty nodes to be added to the scanner for the next
# step.
i = 0
for token in stream:
self.predict_and_complete(i, to_scan, columns, transitives)
to_scan = scan(i, token, to_scan)
i += 1
self.predict_and_complete(i, to_scan, columns, transitives)
## Column is now the final column in the parse.
assert i == len(columns)-1
return to_scan
def parse(self, stream, start):
assert start, start
start_symbol = NonTerminal(start)
columns = [set()]
to_scan = set() # The scan buffer. 'Q' in E.Scott's paper.
## Predict for the start_symbol.
# Add predicted items to the first Earley set (for the predictor) if they
# result in a non-terminal, or the scanner if they result in a terminal.
for rule in self.predictions[start_symbol]:
item = Item(rule, 0, 0)
if item.expect in self.TERMINALS:
to_scan.add(item)
else:
columns[0].add(item)
to_scan = self._parse(stream, columns, to_scan, start_symbol)
# If the parse was successful, the start
# symbol should have been completed in the last step of the Earley cycle, and will be in
# this column. Find the item for the start_symbol, which is the root of the SPPF tree.
solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0]
if self.debug:
from .earley_forest import ForestToPyDotVisitor
try:
debug_walker = ForestToPyDotVisitor()
except ImportError:
logging.warning("Cannot find dependency 'pydot', will not generate sppf debug image")
else:
debug_walker.visit(solutions[0], "sppf.png")
if not solutions:
expected_tokens = [t.expect for t in to_scan]
raise UnexpectedEOF(expected_tokens)
elif len(solutions) > 1:
assert False, 'Earley should not generate multiple start symbol items!'
# Perform our SPPF -> AST conversion using the right ForestVisitor.
forest_tree_visitor_cls = ForestToTreeVisitor if self.resolve_ambiguity else ForestToAmbiguousTreeVisitor
forest_tree_visitor = forest_tree_visitor_cls(self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor())
return forest_tree_visitor.visit(solutions[0])
class ApplyCallbacks(Transformer_InPlace):
def __init__(self, postprocess):
self.postprocess = postprocess
@v_args(meta=True)
def drv(self, children, meta):
return self.postprocess[meta.rule](children) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parsers/earley.py | 0.496826 | 0.294545 | earley.py | pypi |
# Author: Erez Shinan (2017)
# Email : erezshin@gmail.com
from ..exceptions import UnexpectedToken
from ..lexer import Token
from ..utils import Enumerator, Serialize
from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable
from .lalr_puppet import ParserPuppet
###{standalone
class LALR_Parser(object):
def __init__(self, parser_conf, debug=False):
assert all(r.options.priority is None for r in parser_conf.rules), "LALR doesn't yet support prioritization"
analysis = LALR_Analyzer(parser_conf, debug=debug)
analysis.compute_lalr()
callbacks = parser_conf.callbacks
self._parse_table = analysis.parse_table
self.parser_conf = parser_conf
self.parser = _Parser(analysis.parse_table, callbacks, debug)
@classmethod
def deserialize(cls, data, memo, callbacks):
inst = cls.__new__(cls)
inst._parse_table = IntParseTable.deserialize(data, memo)
inst.parser = _Parser(inst._parse_table, callbacks)
return inst
def serialize(self, memo):
return self._parse_table.serialize(memo)
def parse(self, *args):
return self.parser.parse(*args)
class _Parser:
def __init__(self, parse_table, callbacks, debug=False):
self.parse_table = parse_table
self.callbacks = callbacks
self.debug = debug
def parse(self, seq, start, set_state=None, value_stack=None, state_stack=None):
token = None
stream = iter(seq)
states = self.parse_table.states
start_state = self.parse_table.start_states[start]
end_state = self.parse_table.end_states[start]
state_stack = state_stack or [start_state]
value_stack = value_stack or []
if set_state: set_state(start_state)
def get_action(token):
state = state_stack[-1]
try:
return states[state][token.type]
except KeyError:
expected = [s for s in states[state].keys() if s.isupper()]
try:
puppet = ParserPuppet(self, state_stack, value_stack, start, stream, set_state)
except NameError:
puppet = None
raise UnexpectedToken(token, expected, state=state, puppet=puppet)
def reduce(rule):
size = len(rule.expansion)
if size:
s = value_stack[-size:]
del state_stack[-size:]
del value_stack[-size:]
else:
s = []
value = self.callbacks[rule](s)
_action, new_state = states[state_stack[-1]][rule.origin.name]
assert _action is Shift
state_stack.append(new_state)
value_stack.append(value)
# Main LALR-parser loop
try:
for token in stream:
while True:
action, arg = get_action(token)
assert arg != end_state
if action is Shift:
state_stack.append(arg)
value_stack.append(token)
if set_state: set_state(arg)
break # next token
else:
reduce(arg)
except Exception as e:
if self.debug:
print("")
print("STATE STACK DUMP")
print("----------------")
for i, s in enumerate(state_stack):
print('%d)' % i , s)
print("")
raise
token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)
while True:
_action, arg = get_action(token)
assert(_action is Reduce)
reduce(arg)
if state_stack[-1] == end_state:
return value_stack[-1]
###} | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/lark/parsers/lalr_parser.py | 0.498291 | 0.27626 | lalr_parser.py | pypi |
from functools import wraps
import six
from pyrsistent._pmap import PMap, pmap
from pyrsistent._pset import PSet, pset
from pyrsistent._pvector import PVector, pvector
def freeze(o):
"""
Recursively convert simple Python containers into pyrsistent versions
of those containers.
- list is converted to pvector, recursively
- dict is converted to pmap, recursively on values (but not keys)
- set is converted to pset, but not recursively
- tuple is converted to tuple, recursively.
Sets and dict keys are not recursively frozen because they do not contain
mutable data by convention. The main exception to this rule is that
dict keys and set elements are often instances of mutable objects that
support hash-by-id, which this function can't convert anyway.
>>> freeze(set([1, 2]))
pset([1, 2])
>>> freeze([1, {'a': 3}])
pvector([1, pmap({'a': 3})])
>>> freeze((1, []))
(1, pvector([]))
"""
typ = type(o)
if typ is dict:
return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o)))
if typ is list:
return pvector(map(freeze, o))
if typ is tuple:
return tuple(map(freeze, o))
if typ is set:
return pset(o)
return o
def thaw(o):
"""
Recursively convert pyrsistent containers into simple Python containers.
- pvector is converted to list, recursively
- pmap is converted to dict, recursively on values (but not keys)
- pset is converted to set, but not recursively
- tuple is converted to tuple, recursively.
>>> from pyrsistent import s, m, v
>>> thaw(s(1, 2))
{1, 2}
>>> thaw(v(1, m(a=3)))
[1, {'a': 3}]
>>> thaw((1, v()))
(1, [])
"""
if isinstance(o, PVector):
return list(map(thaw, o))
if isinstance(o, PMap):
return dict((k, thaw(v)) for k, v in o.iteritems())
if isinstance(o, PSet):
return set(o)
if type(o) is tuple:
return tuple(map(thaw, o))
return o
def mutant(fn):
"""
Convenience decorator to isolate mutation to within the decorated function (with respect
to the input arguments).
All arguments to the decorated function will be frozen so that they are guaranteed not to change.
The return value is also frozen.
"""
@wraps(fn)
def inner_f(*args, **kwargs):
return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items())))
return inner_f | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_helpers.py | 0.742608 | 0.63168 | _helpers.py | pypi |
import six
import sys
from pyrsistent._checked_types import (
CheckedPMap,
CheckedPSet,
CheckedPVector,
CheckedType,
InvariantException,
_restore_pickle,
get_type,
maybe_parse_user_type,
maybe_parse_many_user_types,
)
from pyrsistent._checked_types import optional as optional_type
from pyrsistent._checked_types import wrap_invariant
import inspect
PY2 = sys.version_info[0] < 3
def set_fields(dct, bases, name):
dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
for k, v in list(dct.items()):
if isinstance(v, _PField):
dct[name][k] = v
del dct[k]
def check_global_invariants(subject, invariants):
error_codes = tuple(error_code for is_ok, error_code in
(invariant(subject) for invariant in invariants) if not is_ok)
if error_codes:
raise InvariantException(error_codes, (), 'Global invariant failed')
def serialize(serializer, format, value):
if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
return value.serialize(format)
return serializer(format, value)
def check_type(destination_cls, field, name, value):
if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
actual_type = type(value)
message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
raise PTypeError(destination_cls, name, field.type, actual_type, message)
def is_type_cls(type_cls, field_type):
if type(field_type) is set:
return True
types = tuple(field_type)
if len(types) == 0:
return False
return issubclass(get_type(types[0]), type_cls)
def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
# ignore_extra param has default False value, for speed purpose no need to propagate False
if not ignore_extra:
return False
if not is_type_cls(type_cls, field.type):
return False
if PY2:
return 'ignore_extra' in inspect.getargspec(field.factory).args
else:
return 'ignore_extra' in inspect.signature(field.factory).parameters
class _PField(object):
__slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
def __init__(self, type, invariant, initial, mandatory, factory, serializer):
self.type = type
self.invariant = invariant
self.initial = initial
self.mandatory = mandatory
self._factory = factory
self.serializer = serializer
@property
def factory(self):
# If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
typ = get_type(tuple(self.type)[0])
if issubclass(typ, CheckedType):
return typ.create
return self._factory
PFIELD_NO_TYPE = ()
PFIELD_NO_INVARIANT = lambda _: (True, None)
PFIELD_NO_FACTORY = lambda x: x
PFIELD_NO_INITIAL = object()
PFIELD_NO_SERIALIZER = lambda _, value: value
def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
"""
Field specification factory for :py:class:`PRecord`.
:param type: a type or iterable with types that are allowed for this field
:param invariant: a function specifying an invariant that must hold for the field
:param initial: value of field if not specified when instantiating the record
:param mandatory: boolean specifying if the field is mandatory or not
:param factory: function called when field is set.
:param serializer: function that returns a serialized version of the field
"""
# NB: We have to check this predicate separately from the predicates in
# `maybe_parse_user_type` et al. because this one is related to supporting
# the argspec for `field`, while those are related to supporting the valid
# ways to specify types.
# Multiple types must be passed in one of the following containers. Note
# that a type that is a subclass of one of these containers, like a
# `collections.namedtuple`, will work as expected, since we check
# `isinstance` and not `issubclass`.
if isinstance(type, (list, set, tuple)):
types = set(maybe_parse_many_user_types(type))
else:
types = set(maybe_parse_user_type(type))
invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
field = _PField(type=types, invariant=invariant_function, initial=initial,
mandatory=mandatory, factory=factory, serializer=serializer)
_check_field_parameters(field)
return field
def _check_field_parameters(field):
for t in field.type:
if not isinstance(t, type) and not isinstance(t, six.string_types):
raise TypeError('Type parameter expected, not {0}'.format(type(t)))
if field.initial is not PFIELD_NO_INITIAL and \
not callable(field.initial) and \
field.type and not any(isinstance(field.initial, t) for t in field.type):
raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
if not callable(field.invariant):
raise TypeError('Invariant must be callable')
if not callable(field.factory):
raise TypeError('Factory must be callable')
if not callable(field.serializer):
raise TypeError('Serializer must be callable')
class PTypeError(TypeError):
"""
Raised when trying to assign a value with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the record
field -- Field name
expected_types -- Types allowed for the field
actual_type -- The non matching type
"""
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
# Global dictionary to hold auto-generated field types: used for unpickling
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
"""Unpickling function for auto-generated PVec/PSet field types."""
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
"""Convert a tuple of types to a human-readable string."""
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type):
"""Create a subclass of the given checked class with the given item type."""
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory.
:return: A ``field`` containing a checked class.
"""
TheType = _make_seq_field_type(checked_class, item_type)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=()):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional,
initial)
def pvector_field(item_type, optional=False, initial=()):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional,
initial)
_valid = lambda item: (True, "")
# Global dictionary to hold auto-generated field types: used for unpickling
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
"""Unpickling function for auto-generated PMap field types."""
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
"""Create a subclass of CheckedPMap with the given key and value types."""
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
:param key: The required type for the keys of the map.
:param value: The required type for the values of the map.
:param optional: If true, ``None`` can be used as a value for
this field.
:param invariant: Pass-through to ``field``.
:return: A ``field`` containing a ``CheckedPMap``.
"""
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_field_common.py | 0.499023 | 0.219714 | _field_common.py | pypi |
import six
from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants)
from pyrsistent._field_common import (
set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
)
from pyrsistent._transformations import transform
def _is_pclass(bases):
return len(bases) == 1 and bases[0] == CheckedType
class PClassMeta(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_pclass_fields')
store_invariants(dct, bases, '_pclass_invariants', '__invariant__')
dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields'])
# There must only be one __weakref__ entry in the inheritance hierarchy,
# lets put it on the top level class.
if _is_pclass(bases):
dct['__slots__'] += ('__weakref__',)
return super(PClassMeta, mcs).__new__(mcs, name, bases, dct)
_MISSING_VALUE = object()
def _check_and_set_attr(cls, field, name, value, result, invariant_errors):
check_type(cls, field, name, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
invariant_errors.append(error_code)
else:
setattr(result, name, value)
@six.add_metaclass(PClassMeta)
class PClass(CheckedType):
"""
A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
is not a PMap and hence not a collection but rather a plain Python object.
More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent
"""
def __new__(cls, **kwargs): # Support *args?
result = super(PClass, cls).__new__(cls)
factory_fields = kwargs.pop('_factory_fields', None)
ignore_extra = kwargs.pop('ignore_extra', None)
missing_fields = []
invariant_errors = []
for name, field in cls._pclass_fields.items():
if name in kwargs:
if factory_fields is None or name in factory_fields:
if is_field_ignore_extra_complaint(PClass, field, ignore_extra):
value = field.factory(kwargs[name], ignore_extra=ignore_extra)
else:
value = field.factory(kwargs[name])
else:
value = kwargs[name]
_check_and_set_attr(cls, field, name, value, result, invariant_errors)
del kwargs[name]
elif field.initial is not PFIELD_NO_INITIAL:
initial = field.initial() if callable(field.initial) else field.initial
_check_and_set_attr(
cls, field, name, initial, result, invariant_errors)
elif field.mandatory:
missing_fields.append('{0}.{1}'.format(cls.__name__, name))
if invariant_errors or missing_fields:
raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed')
if kwargs:
raise AttributeError("'{0}' are not among the specified fields for {1}".format(
', '.join(kwargs), cls.__name__))
check_global_invariants(result, cls._pclass_invariants)
result._pclass_frozen = True
return result
def set(self, *args, **kwargs):
"""
Set a field in the instance. Returns a new instance with the updated value. The original instance remains
unmodified. Accepts key-value pairs or single string representing the field name and a value.
>>> from pyrsistent import PClass, field
>>> class AClass(PClass):
... x = field()
...
>>> a = AClass(x=1)
>>> a2 = a.set(x=2)
>>> a3 = a.set('x', 3)
>>> a
AClass(x=1)
>>> a2
AClass(x=2)
>>> a3
AClass(x=3)
"""
if args:
kwargs[args[0]] = args[1]
factory_fields = set(kwargs)
for key in self._pclass_fields:
if key not in kwargs:
value = getattr(self, key, _MISSING_VALUE)
if value is not _MISSING_VALUE:
kwargs[key] = value
return self.__class__(_factory_fields=factory_fields, **kwargs)
@classmethod
def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
"""
Factory method. Will create a new PClass of the current type and assign the values
specified in kwargs.
:param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
in the set of fields on the PClass.
"""
if isinstance(kwargs, cls):
return kwargs
if ignore_extra:
kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs}
return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs)
def serialize(self, format=None):
"""
Serialize the current PClass using custom serializer functions for fields where
such have been supplied.
"""
result = {}
for name in self._pclass_fields:
value = getattr(self, name, _MISSING_VALUE)
if value is not _MISSING_VALUE:
result[name] = serialize(self._pclass_fields[name].serializer, format, value)
return result
def transform(self, *transformations):
"""
Apply transformations to the currency PClass. For more details on transformations see
the documentation for PMap. Transformations on PClasses do not support key matching
since the PClass is not a collection. Apart from that the transformations available
for other persistent types work as expected.
"""
return transform(self, transformations)
def __eq__(self, other):
if isinstance(other, self.__class__):
for name in self._pclass_fields:
if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE):
return False
return True
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
# May want to optimize this by caching the hash somehow
return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields))
def __setattr__(self, key, value):
if getattr(self, '_pclass_frozen', False):
raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value))
super(PClass, self).__setattr__(key, value)
def __delattr__(self, key):
raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key))
def _to_dict(self):
result = {}
for key in self._pclass_fields:
value = getattr(self, key, _MISSING_VALUE)
if value is not _MISSING_VALUE:
result[key] = value
return result
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items()))
def __reduce__(self):
# Pickling support
data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key))
return _restore_pickle, (self.__class__, data,)
def evolver(self):
"""
Returns an evolver for this object.
"""
return _PClassEvolver(self, self._to_dict())
def remove(self, name):
"""
Remove attribute given by name from the current instance. Raises AttributeError if the
attribute doesn't exist.
"""
evolver = self.evolver()
del evolver[name]
return evolver.persistent()
class _PClassEvolver(object):
__slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
def __init__(self, original, initial_dict):
self._pclass_evolver_original = original
self._pclass_evolver_data = initial_dict
self._pclass_evolver_data_is_dirty = False
self._factory_fields = set()
def __getitem__(self, item):
return self._pclass_evolver_data[item]
def set(self, key, value):
if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
self._pclass_evolver_data[key] = value
self._factory_fields.add(key)
self._pclass_evolver_data_is_dirty = True
return self
def __setitem__(self, key, value):
self.set(key, value)
def remove(self, item):
if item in self._pclass_evolver_data:
del self._pclass_evolver_data[item]
self._factory_fields.discard(item)
self._pclass_evolver_data_is_dirty = True
return self
raise AttributeError(item)
def __delitem__(self, item):
self.remove(item)
def persistent(self):
if self._pclass_evolver_data_is_dirty:
return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
**self._pclass_evolver_data)
return self._pclass_evolver_original
def __setattr__(self, key, value):
if key not in self.__slots__:
self.set(key, value)
else:
super(_PClassEvolver, self).__setattr__(key, value)
def __getattr__(self, item):
return self[item] | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_pclass.py | 0.63341 | 0.239866 | _pclass.py | pypi |
import re
import six
try:
from inspect import Parameter, signature
except ImportError:
signature = None
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
_EMPTY_SENTINEL = object()
def inc(x):
""" Add one to the current value """
return x + 1
def dec(x):
""" Subtract one from the current value """
return x - 1
def discard(evolver, key):
""" Discard the element and returns a structure without the discarded elements """
try:
del evolver[key]
except KeyError:
pass
# Matchers
def rex(expr):
""" Regular expression matcher to use together with transform functions """
r = re.compile(expr)
return lambda key: isinstance(key, six.string_types) and r.match(key)
def ny(_):
""" Matcher that matches any value """
return True
# Support functions
def _chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def transform(structure, transformations):
r = structure
for path, command in _chunks(transformations, 2):
r = _do_to_path(r, path, command)
return r
def _do_to_path(structure, path, command):
if not path:
return command(structure) if callable(command) else command
kvs = _get_keys_and_values(structure, path[0])
return _update_structure(structure, kvs, path[1:], command)
def _items(structure):
try:
return structure.items()
except AttributeError:
# Support wider range of structures by adding a transform_items() or similar?
return list(enumerate(structure))
def _get(structure, key, default):
try:
if hasattr(structure, '__getitem__'):
return structure[key]
return getattr(structure, key)
except (IndexError, KeyError):
return default
def _get_keys_and_values(structure, key_spec):
if callable(key_spec):
# Support predicates as callable objects in the path
arity = _get_arity(key_spec)
if arity == 1:
# Unary predicates are called with the "key" of the path
# - eg a key in a mapping, an index in a sequence.
return [(k, v) for k, v in _items(structure) if key_spec(k)]
elif arity == 2:
# Binary predicates are called with the key and the corresponding
# value.
return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
else:
# Other arities are an error.
raise ValueError(
"callable in transform path must take 1 or 2 arguments"
)
# Non-callables are used as-is as a key.
return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
if signature is None:
def _get_arity(f):
argspec = getargspec(f)
return len(argspec.args) - len(argspec.defaults or ())
else:
def _get_arity(f):
return sum(
1
for p
in signature(f).parameters.values()
if p.default is Parameter.empty
and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
)
def _update_structure(structure, kvs, path, command):
from pyrsistent._pmap import pmap
e = structure.evolver()
if not path and command is discard:
# Do this in reverse to avoid index problems with vectors. See #92.
for k, v in reversed(kvs):
discard(e, k)
else:
for k, v in kvs:
is_empty = False
if v is _EMPTY_SENTINEL:
# Allow expansion of structure but make sure to cover the case
# when an empty pmap is added as leaf node. See #154.
is_empty = True
v = pmap()
result = _do_to_path(v, path, command)
if result is not v or is_empty:
e[k] = result
return e.persistent() | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_transformations.py | 0.53607 | 0.472318 | _transformations.py | pypi |
from ._compat import Container, Iterable, Sized, Hashable
from functools import reduce
from pyrsistent._pmap import pmap
def _add_to_counters(counters, element):
return counters.set(element, counters.get(element, 0) + 1)
class PBag(object):
"""
A persistent bag/multiset type.
Requires elements to be hashable, and allows duplicates, but has no
ordering. Bags are hashable.
Do not instantiate directly, instead use the factory functions :py:func:`b`
or :py:func:`pbag` to create an instance.
Some examples:
>>> s = pbag([1, 2, 3, 1])
>>> s2 = s.add(4)
>>> s3 = s2.remove(1)
>>> s
pbag([1, 1, 2, 3])
>>> s2
pbag([1, 1, 2, 3, 4])
>>> s3
pbag([1, 2, 3, 4])
"""
__slots__ = ('_counts', '__weakref__')
def __init__(self, counts):
self._counts = counts
def add(self, element):
"""
Add an element to the bag.
>>> s = pbag([1])
>>> s2 = s.add(1)
>>> s3 = s.add(2)
>>> s2
pbag([1, 1])
>>> s3
pbag([1, 2])
"""
return PBag(_add_to_counters(self._counts, element))
def update(self, iterable):
"""
Update bag with all elements in iterable.
>>> s = pbag([1])
>>> s.update([1, 2])
pbag([1, 1, 2])
"""
if iterable:
return PBag(reduce(_add_to_counters, iterable, self._counts))
return self
def remove(self, element):
"""
Remove an element from the bag.
>>> s = pbag([1, 1, 2])
>>> s2 = s.remove(1)
>>> s3 = s.remove(2)
>>> s2
pbag([1, 2])
>>> s3
pbag([1, 1])
"""
if element not in self._counts:
raise KeyError(element)
elif self._counts[element] == 1:
newc = self._counts.remove(element)
else:
newc = self._counts.set(element, self._counts[element] - 1)
return PBag(newc)
def count(self, element):
"""
Return the number of times an element appears.
>>> pbag([]).count('non-existent')
0
>>> pbag([1, 1, 2]).count(1)
2
"""
return self._counts.get(element, 0)
def __len__(self):
"""
Return the length including duplicates.
>>> len(pbag([1, 1, 2]))
3
"""
return sum(self._counts.itervalues())
def __iter__(self):
"""
Return an iterator of all elements, including duplicates.
>>> list(pbag([1, 1, 2]))
[1, 1, 2]
>>> list(pbag([1, 2]))
[1, 2]
"""
for elt, count in self._counts.iteritems():
for i in range(count):
yield elt
def __contains__(self, elt):
"""
Check if an element is in the bag.
>>> 1 in pbag([1, 1, 2])
True
>>> 0 in pbag([1, 2])
False
"""
return elt in self._counts
def __repr__(self):
return "pbag({0})".format(list(self))
def __eq__(self, other):
"""
Check if two bags are equivalent, honoring the number of duplicates,
and ignoring insertion order.
>>> pbag([1, 1, 2]) == pbag([1, 2])
False
>>> pbag([2, 1, 0]) == pbag([0, 1, 2])
True
"""
if type(other) is not PBag:
raise TypeError("Can only compare PBag with PBags")
return self._counts == other._counts
def __lt__(self, other):
raise TypeError('PBags are not orderable')
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
# Multiset-style operations similar to collections.Counter
def __add__(self, other):
"""
Combine elements from two PBags.
>>> pbag([1, 2, 2]) + pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
result[elem] = self.count(elem) + other_count
return PBag(result.persistent())
def __sub__(self, other):
"""
Remove elements from one PBag that are present in another.
>>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
pbag([1, 2, 2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
newcount = self.count(elem) - other_count
if newcount > 0:
result[elem] = newcount
elif elem in self:
result.remove(elem)
return PBag(result.persistent())
def __or__(self, other):
"""
Union: Keep elements that are present in either of two PBags.
>>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
pbag([1, 2, 2, 2, 3, 3])
"""
if not isinstance(other, PBag):
return NotImplemented
result = self._counts.evolver()
for elem, other_count in other._counts.iteritems():
count = self.count(elem)
newcount = max(count, other_count)
result[elem] = newcount
return PBag(result.persistent())
def __and__(self, other):
"""
Intersection: Only keep elements that are present in both PBags.
>>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
pbag([2])
"""
if not isinstance(other, PBag):
return NotImplemented
result = pmap().evolver()
for elem, count in self._counts.iteritems():
newcount = min(count, other.count(elem))
if newcount > 0:
result[elem] = newcount
return PBag(result.persistent())
def __hash__(self):
"""
Hash based on value of elements.
>>> m = pmap({pbag([1, 2]): "it's here!"})
>>> m[pbag([2, 1])]
"it's here!"
>>> pbag([1, 1, 2]) in m
False
"""
return hash(self._counts)
Container.register(PBag)
Iterable.register(PBag)
Sized.register(PBag)
Hashable.register(PBag)
def b(*elements):
"""
Construct a persistent bag.
Takes an arbitrary number of arguments to insert into the new persistent
bag.
>>> b(1, 2, 3, 2)
pbag([1, 2, 2, 3])
"""
return pbag(elements)
def pbag(elements):
"""
Convert an iterable to a persistent bag.
Takes an iterable with elements to insert.
>>> pbag([1, 2, 3, 2])
pbag([1, 2, 2, 3])
"""
if not elements:
return _EMPTY_PBAG
return PBag(reduce(_add_to_counters, elements, pmap()))
_EMPTY_PBAG = PBag(pmap()) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_pbag.py | 0.81582 | 0.405331 | _pbag.py | pypi |
from ._compat import Iterable
import six
from pyrsistent._compat import Enum, string_types
from pyrsistent._pmap import PMap, pmap
from pyrsistent._pset import PSet, pset
from pyrsistent._pvector import PythonPVector, python_pvector
class CheckedType(object):
"""
Marker class to enable creation and serialization of checked object graphs.
"""
__slots__ = ()
@classmethod
def create(cls, source_data, _factory_fields=None):
raise NotImplementedError()
def serialize(self, format=None):
raise NotImplementedError()
def _restore_pickle(cls, data):
return cls.create(data, _factory_fields=set())
class InvariantException(Exception):
"""
Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory
field is missing.
Contains two fields of interest:
invariant_errors, a tuple of error data for the failing invariants
missing_fields, a tuple of strings specifying the missing names
"""
def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs):
self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes)
self.missing_fields = missing_fields
super(InvariantException, self).__init__(*args, **kwargs)
def __str__(self):
return super(InvariantException, self).__str__() + \
", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format(
invariant_errors=', '.join(str(e) for e in self.invariant_errors),
missing_fields=', '.join(self.missing_fields))
_preserved_iterable_types = (
Enum,
)
"""Some types are themselves iterable, but we want to use the type itself and
not its members for the type specification. This defines a set of such types
that we explicitly preserve.
Note that strings are not such types because the string inputs we pass in are
values, not types.
"""
def maybe_parse_user_type(t):
"""Try to coerce a user-supplied type directive into a list of types.
This function should be used in all places where a user specifies a type,
for consistency.
The policy for what defines valid user input should be clear from the implementation.
"""
is_type = isinstance(t, type)
is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
is_string = isinstance(t, string_types)
is_iterable = isinstance(t, Iterable)
if is_preserved:
return [t]
elif is_string:
return [t]
elif is_type and not is_iterable:
return [t]
elif is_iterable:
# Recur to validate contained types as well.
ts = t
return tuple(e for t in ts for e in maybe_parse_user_type(t))
else:
# If this raises because `t` cannot be formatted, so be it.
raise TypeError(
'Type specifications must be types or strings. Input: {}'.format(t)
)
def maybe_parse_many_user_types(ts):
# Just a different name to communicate that you're parsing multiple user
# inputs. `maybe_parse_user_type` handles the iterable case anyway.
return maybe_parse_user_type(ts)
def _store_types(dct, bases, destination_name, source_name):
maybe_types = maybe_parse_many_user_types([
d[source_name]
for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d
])
dct[destination_name] = maybe_types
def _merge_invariant_results(result):
verdict = True
data = []
for verd, dat in result:
if not verd:
verdict = False
data.append(dat)
return verdict, tuple(data)
def wrap_invariant(invariant):
# Invariant functions may return the outcome of several tests
# In those cases the results have to be merged before being passed
# back to the client.
def f(*args, **kwargs):
result = invariant(*args, **kwargs)
if isinstance(result[0], bool):
return result
return _merge_invariant_results(result)
return f
def _all_dicts(bases, seen=None):
"""
Yield each class in ``bases`` and each of their base classes.
"""
if seen is None:
seen = set()
for cls in bases:
if cls in seen:
continue
seen.add(cls)
yield cls.__dict__
for b in _all_dicts(cls.__bases__, seen):
yield b
def store_invariants(dct, bases, destination_name, source_name):
# Invariants are inherited
invariants = []
for ns in [dct] + list(_all_dicts(bases)):
try:
invariant = ns[source_name]
except KeyError:
continue
invariants.append(invariant)
if not all(callable(invariant) for invariant in invariants):
raise TypeError('Invariants must be callable')
dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
class _CheckedTypeMeta(type):
def __new__(mcs, name, bases, dct):
_store_types(dct, bases, '_checked_types', '__type__')
store_invariants(dct, bases, '_checked_invariants', '__invariant__')
def default_serializer(self, _, value):
if isinstance(value, CheckedType):
return value.serialize()
return value
dct.setdefault('__serializer__', default_serializer)
dct['__slots__'] = ()
return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
class CheckedTypeError(TypeError):
def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs):
super(CheckedTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.expected_types = expected_types
self.actual_type = actual_type
self.actual_value = actual_value
class CheckedKeyTypeError(CheckedTypeError):
"""
Raised when trying to set a value using a key with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the collection
expected_types -- Allowed types
actual_type -- The non matching type
actual_value -- Value of the variable with the non matching type
"""
pass
class CheckedValueTypeError(CheckedTypeError):
"""
Raised when trying to set a value using a key with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the collection
expected_types -- Allowed types
actual_type -- The non matching type
actual_value -- Value of the variable with the non matching type
"""
pass
def _get_class(type_name):
module_name, class_name = type_name.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
def get_type(typ):
if isinstance(typ, type):
return typ
return _get_class(typ)
def get_types(typs):
return [get_type(typ) for typ in typs]
def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError):
if expected_types:
for e in it:
if not any(isinstance(e, get_type(t)) for t in expected_types):
actual_type = type(e)
msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format(
source_class=source_class.__name__,
expected_types=tuple(get_type(et).__name__ for et in expected_types),
actual_type=actual_type.__name__)
raise exception_type(source_class, expected_types, actual_type, e, msg)
def _invariant_errors(elem, invariants):
return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid]
def _invariant_errors_iterable(it, invariants):
return sum([_invariant_errors(elem, invariants) for elem in it], [])
def optional(*typs):
""" Convenience function to specify that a value may be of any of the types in type 'typs' or None """
return tuple(typs) + (type(None),)
def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False):
if isinstance(source_data, cls):
return source_data
# Recursively apply create methods of checked types if the types of the supplied data
# does not match any of the valid types.
types = get_types(cls._checked_types)
checked_type = next((t for t in types if issubclass(t, CheckedType)), None)
if checked_type:
return cls([checked_type.create(data, ignore_extra=ignore_extra)
if not any(isinstance(data, t) for t in types) else data
for data in source_data])
return cls(source_data)
@six.add_metaclass(_CheckedTypeMeta)
class CheckedPVector(PythonPVector, CheckedType):
"""
A CheckedPVector is a PVector which allows specifying type and invariant checks.
>>> class Positives(CheckedPVector):
... __type__ = (int, float)
... __invariant__ = lambda n: (n >= 0, 'Negative')
...
>>> Positives([1, 2, 3])
Positives([1, 2, 3])
"""
__slots__ = ()
def __new__(cls, initial=()):
if type(initial) == PythonPVector:
return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail)
return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent()
def set(self, key, value):
return self.evolver().set(key, value).persistent()
def append(self, val):
return self.evolver().append(val).persistent()
def extend(self, it):
return self.evolver().extend(it).persistent()
create = classmethod(_checked_type_create)
def serialize(self, format=None):
serializer = self.__serializer__
return list(serializer(format, v) for v in self)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, list(self),)
class Evolver(PythonPVector.Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, vector):
super(CheckedPVector.Evolver, self).__init__(vector)
self._destination_class = destination_class
self._invariant_errors = []
def _check(self, it):
_check_types(it, self._destination_class._checked_types, self._destination_class)
error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
self._invariant_errors.extend(error_data)
def __setitem__(self, key, value):
self._check([value])
return super(CheckedPVector.Evolver, self).__setitem__(key, value)
def append(self, elem):
self._check([elem])
return super(CheckedPVector.Evolver, self).append(elem)
def extend(self, it):
it = list(it)
self._check(it)
return super(CheckedPVector.Evolver, self).extend(it)
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
result = self._orig_pvector
if self.is_dirty() or (self._destination_class != type(self._orig_pvector)):
pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail)
result = self._destination_class(pv)
self._reset(result)
return result
def __repr__(self):
return self.__class__.__name__ + "({0})".format(self.tolist())
__str__ = __repr__
def evolver(self):
return CheckedPVector.Evolver(self.__class__, self)
@six.add_metaclass(_CheckedTypeMeta)
class CheckedPSet(PSet, CheckedType):
"""
A CheckedPSet is a PSet which allows specifying type and invariant checks.
>>> class Positives(CheckedPSet):
... __type__ = (int, float)
... __invariant__ = lambda n: (n >= 0, 'Negative')
...
>>> Positives([1, 2, 3])
Positives([1, 2, 3])
"""
__slots__ = ()
def __new__(cls, initial=()):
if type(initial) is PMap:
return super(CheckedPSet, cls).__new__(cls, initial)
evolver = CheckedPSet.Evolver(cls, pset())
for e in initial:
evolver.add(e)
return evolver.persistent()
def __repr__(self):
return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:]
def __str__(self):
return self.__repr__()
def serialize(self, format=None):
serializer = self.__serializer__
return set(serializer(format, v) for v in self)
create = classmethod(_checked_type_create)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, list(self),)
def evolver(self):
return CheckedPSet.Evolver(self.__class__, self)
class Evolver(PSet._Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, original_set):
super(CheckedPSet.Evolver, self).__init__(original_set)
self._destination_class = destination_class
self._invariant_errors = []
def _check(self, it):
_check_types(it, self._destination_class._checked_types, self._destination_class)
error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
self._invariant_errors.extend(error_data)
def add(self, element):
self._check([element])
self._pmap_evolver[element] = True
return self
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
if self.is_dirty() or self._destination_class != type(self._original_pset):
return self._destination_class(self._pmap_evolver.persistent())
return self._original_pset
class _CheckedMapTypeMeta(type):
def __new__(mcs, name, bases, dct):
_store_types(dct, bases, '_checked_key_types', '__key_type__')
_store_types(dct, bases, '_checked_value_types', '__value_type__')
store_invariants(dct, bases, '_checked_invariants', '__invariant__')
def default_serializer(self, _, key, value):
sk = key
if isinstance(key, CheckedType):
sk = key.serialize()
sv = value
if isinstance(value, CheckedType):
sv = value.serialize()
return sk, sv
dct.setdefault('__serializer__', default_serializer)
dct['__slots__'] = ()
return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct)
# Marker object
_UNDEFINED_CHECKED_PMAP_SIZE = object()
@six.add_metaclass(_CheckedMapTypeMeta)
class CheckedPMap(PMap, CheckedType):
"""
A CheckedPMap is a PMap which allows specifying type and invariant checks.
>>> class IntToFloatMap(CheckedPMap):
... __key_type__ = int
... __value_type__ = float
... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
...
>>> IntToFloatMap({1: 1.5, 2: 2.25})
IntToFloatMap({1: 1.5, 2: 2.25})
"""
__slots__ = ()
def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
return super(CheckedPMap, cls).__new__(cls, size, initial)
evolver = CheckedPMap.Evolver(cls, pmap())
for k, v in initial.items():
evolver.set(k, v)
return evolver.persistent()
def evolver(self):
return CheckedPMap.Evolver(self.__class__, self)
def __repr__(self):
return self.__class__.__name__ + "({0})".format(str(dict(self)))
__str__ = __repr__
def serialize(self, format=None):
serializer = self.__serializer__
return dict(serializer(format, k, v) for k, v in self.items())
@classmethod
def create(cls, source_data, _factory_fields=None):
if isinstance(source_data, cls):
return source_data
# Recursively apply create methods of checked types if the types of the supplied data
# does not match any of the valid types.
key_types = get_types(cls._checked_key_types)
checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
value_types = get_types(cls._checked_value_types)
checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
if checked_key_type or checked_value_type:
return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
for key, value in source_data.items()))
return cls(source_data)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
class Evolver(PMap._Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, original_map):
super(CheckedPMap.Evolver, self).__init__(original_map)
self._destination_class = destination_class
self._invariant_errors = []
def set(self, key, value):
_check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
_check_types([value], self._destination_class._checked_value_types, self._destination_class)
self._invariant_errors.extend(data for valid, data in (invariant(key, value)
for invariant in self._destination_class._checked_invariants)
if not valid)
return super(CheckedPMap.Evolver, self).set(key, value)
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
if self.is_dirty() or type(self._original_pmap) != self._destination_class:
return self._destination_class(self._buckets_evolver.persistent(), self._size)
return self._original_pmap | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_checked_types.py | 0.825765 | 0.398172 | _checked_types.py | pypi |
import sys
import six
def immutable(members='', name='Immutable', verbose=False):
"""
Produces a class that either can be used standalone or as a base class for persistent classes.
This is a thin wrapper around a named tuple.
Constructing a type and using it to instantiate objects:
>>> Point = immutable('x, y', name='Point')
>>> p = Point(1, 2)
>>> p2 = p.set(x=3)
>>> p
Point(x=1, y=2)
>>> p2
Point(x=3, y=2)
Inheriting from a constructed type. In this case no type name needs to be supplied:
>>> class PositivePoint(immutable('x, y')):
... __slots__ = tuple()
... def __new__(cls, x, y):
... if x > 0 and y > 0:
... return super(PositivePoint, cls).__new__(cls, x, y)
... raise Exception('Coordinates must be positive!')
...
>>> p = PositivePoint(1, 2)
>>> p.set(x=3)
PositivePoint(x=3, y=2)
>>> p.set(y=-3)
Traceback (most recent call last):
Exception: Coordinates must be positive!
The persistent class also supports the notion of frozen members. The value of a frozen member
cannot be updated. For example it could be used to implement an ID that should remain the same
over time. A frozen member is denoted by a trailing underscore.
>>> Point = immutable('x, y, id_', name='Point')
>>> p = Point(1, 2, id_=17)
>>> p.set(x=3)
Point(x=3, y=2, id_=17)
>>> p.set(id_=18)
Traceback (most recent call last):
AttributeError: Cannot set frozen members id_
"""
if isinstance(members, six.string_types):
members = members.replace(',', ' ').split()
def frozen_member_test():
frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
if frozen_members:
return """
frozen_fields = fields_to_modify & set([{frozen_members}])
if frozen_fields:
raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
""".format(frozen_members=', '.join(frozen_members))
return ''
verbose_string = ""
if sys.version_info < (3, 7):
# Verbose is no longer supported in Python 3.7
verbose_string = ", verbose={verbose}".format(verbose=verbose)
quoted_members = ', '.join("'%s'" % m for m in members)
template = """
class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
__slots__ = tuple()
def __repr__(self):
return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
def set(self, **kwargs):
if not kwargs:
return self
fields_to_modify = set(kwargs.keys())
if not fields_to_modify <= {member_set}:
raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
{frozen_member_test}
return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
""".format(quoted_members=quoted_members,
member_set="set([%s])" % quoted_members if quoted_members else 'set()',
frozen_member_test=frozen_member_test(),
verbose_string=verbose_string,
class_name=name)
if verbose:
print(template)
from collections import namedtuple
namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
try:
six.exec_(template, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
return namespace[name] | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_immutable.py | 0.645232 | 0.536009 | _immutable.py | pypi |
from ._compat import Sequence, Hashable
from itertools import islice, chain
from numbers import Integral
from pyrsistent._plist import plist
class PDeque(object):
"""
Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
using two persistent lists.
A maximum length can be specified to create a bounded queue.
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
create an instance.
Some examples:
>>> x = pdeque([1, 2, 3])
>>> x.left
1
>>> x.right
3
>>> x[0] == x.left
True
>>> x[-1] == x.right
True
>>> x.pop()
pdeque([1, 2])
>>> x.pop() == x[:-1]
True
>>> x.popleft()
pdeque([2, 3])
>>> x.append(4)
pdeque([1, 2, 3, 4])
>>> x.appendleft(4)
pdeque([4, 1, 2, 3])
>>> y = pdeque([1, 2, 3], maxlen=3)
>>> y.append(4)
pdeque([2, 3, 4], maxlen=3)
>>> y.appendleft(4)
pdeque([4, 1, 2], maxlen=3)
"""
__slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
def __new__(cls, left_list, right_list, length, maxlen=None):
instance = super(PDeque, cls).__new__(cls)
instance._left_list = left_list
instance._right_list = right_list
instance._length = length
if maxlen is not None:
if not isinstance(maxlen, Integral):
raise TypeError('An integer is required as maxlen')
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
instance._maxlen = maxlen
return instance
@property
def right(self):
"""
Rightmost element in dqueue.
"""
return PDeque._tip_from_lists(self._right_list, self._left_list)
@property
def left(self):
"""
Leftmost element in dqueue.
"""
return PDeque._tip_from_lists(self._left_list, self._right_list)
@staticmethod
def _tip_from_lists(primary_list, secondary_list):
if primary_list:
return primary_list.first
if secondary_list:
return secondary_list[-1]
raise IndexError('No elements in empty deque')
def __iter__(self):
return chain(self._left_list, self._right_list.reverse())
def __repr__(self):
return "pdeque({0}{1})".format(list(self),
', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
__str__ = __repr__
@property
def maxlen(self):
"""
Maximum length of the queue.
"""
return self._maxlen
def pop(self, count=1):
"""
Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2])
"""
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
def popleft(self, count=1):
"""
Return new deque with leftmost element removed. Otherwise functionally
equivalent to pop().
>>> pdeque([1, 2]).popleft()
pdeque([2])
"""
if count < 0:
return self.pop(-count)
new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
@staticmethod
def _pop_lists(primary_list, secondary_list, count):
new_primary_list = primary_list
new_secondary_list = secondary_list
while count > 0 and (new_primary_list or new_secondary_list):
count -= 1
if new_primary_list.rest:
new_primary_list = new_primary_list.rest
elif new_primary_list:
new_primary_list = new_secondary_list.reverse()
new_secondary_list = plist()
else:
new_primary_list = new_secondary_list.reverse().rest
new_secondary_list = plist()
return new_primary_list, new_secondary_list
def _is_empty(self):
return not self._left_list and not self._right_list
def __lt__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
if tuple(self) == tuple(other):
# Sanity check of the length value since it is redundant (there for performance)
assert len(self) == len(other)
return True
return False
def __hash__(self):
return hash(tuple(self))
def __len__(self):
return self._length
def append(self, elem):
"""
Return new deque with elem as the rightmost element.
>>> pdeque([1, 2]).append(3)
pdeque([1, 2, 3])
"""
new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def appendleft(self, elem):
"""
Return new deque with elem as the leftmost element.
>>> pdeque([1, 2]).appendleft(3)
pdeque([3, 1, 2])
"""
new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def _append(self, primary_list, secondary_list, elem):
if self._maxlen is not None and self._length == self._maxlen:
if self._maxlen == 0:
return primary_list, secondary_list, 0
new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
return new_primary_list, new_secondary_list.cons(elem), self._length
return primary_list, secondary_list.cons(elem), self._length + 1
@staticmethod
def _extend_list(the_list, iterable):
count = 0
for elem in iterable:
the_list = the_list.cons(elem)
count += 1
return the_list, count
def _extend(self, primary_list, secondary_list, iterable):
new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
new_secondary_list = secondary_list
current_len = self._length + extend_count
if self._maxlen is not None and current_len > self._maxlen:
pop_len = current_len - self._maxlen
new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
extend_count -= pop_len
return new_primary_list, new_secondary_list, extend_count
def extend(self, iterable):
"""
Return new deque with all elements of iterable appended to the right.
>>> pdeque([1, 2]).extend([3, 4])
pdeque([1, 2, 3, 4])
"""
new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def extendleft(self, iterable):
"""
Return new deque with all elements of iterable appended to the left.
NB! The elements will be inserted in reverse order compared to the order in the iterable.
>>> pdeque([1, 2]).extendleft([3, 4])
pdeque([4, 3, 1, 2])
"""
new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def count(self, elem):
"""
Return the number of elements equal to elem present in the queue
>>> pdeque([1, 2, 1]).count(1)
2
"""
return self._left_list.count(elem) + self._right_list.count(elem)
def remove(self, elem):
"""
Return new deque with first element from left equal to elem removed. If no such element is found
a ValueError is raised.
>>> pdeque([2, 1, 2]).remove(2)
pdeque([1, 2])
"""
try:
return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
except ValueError:
# Value not found in left list, try the right list
try:
# This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
return PDeque(self._left_list,
self._right_list.reverse().remove(elem).reverse(), self._length - 1)
except ValueError:
raise ValueError('{0} not found in PDeque'.format(elem))
def reverse(self):
"""
Return reversed deque.
>>> pdeque([1, 2, 3]).reverse()
pdeque([3, 2, 1])
Also supports the standard python reverse function.
>>> reversed(pdeque([1, 2, 3]))
pdeque([3, 2, 1])
"""
return PDeque(self._right_list, self._left_list, self._length)
__reversed__ = reverse
def rotate(self, steps):
"""
Return deque with elements rotated steps steps.
>>> x = pdeque([1, 2, 3])
>>> x.rotate(1)
pdeque([3, 1, 2])
>>> x.rotate(-2)
pdeque([3, 1, 2])
"""
popped_deque = self.pop(steps)
if steps >= 0:
return popped_deque.extendleft(islice(self.reverse(), steps))
return popped_deque.extend(islice(self, -steps))
def __reduce__(self):
# Pickling support
return pdeque, (list(self), self._maxlen)
def __getitem__(self, index):
if isinstance(index, slice):
if index.step is not None and index.step != 1:
# Too difficult, no structural sharing possible
return pdeque(tuple(self)[index], maxlen=self._maxlen)
result = self
if index.start is not None:
result = result.popleft(index.start % self._length)
if index.stop is not None:
result = result.pop(self._length - (index.stop % self._length))
return result
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index >= 0:
return self.popleft(index).left
shifted = len(self) + index
if shifted < 0:
raise IndexError(
"pdeque index {0} out of range {1}".format(index, len(self)),
)
return self.popleft(shifted).left
index = Sequence.index
Sequence.register(PDeque)
Hashable.register(PDeque)
def pdeque(iterable=(), maxlen=None):
"""
Return deque containing the elements of iterable. If maxlen is specified then
len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
>>> pdeque([1, 2, 3])
pdeque([1, 2, 3])
>>> pdeque([1, 2, 3, 4], maxlen=2)
pdeque([3, 4], maxlen=2)
"""
t = tuple(iterable)
if maxlen is not None:
t = t[-maxlen:]
length = len(t)
pivot = int(length / 2)
left = plist(t[:pivot])
right = plist(t[pivot:], reverse=True)
return PDeque(left, right, length, maxlen)
def dq(*elements):
"""
Return deque containing all arguments.
>>> dq(1, 2, 3)
pdeque([1, 2, 3])
"""
return pdeque(elements) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_pdeque.py | 0.889739 | 0.52007 | _pdeque.py | pypi |
from ._compat import Sequence, Hashable
from numbers import Integral
from functools import reduce
class _PListBuilder(object):
"""
Helper class to allow construction of a list without
having to reverse it in the end.
"""
__slots__ = ('_head', '_tail')
def __init__(self):
self._head = _EMPTY_PLIST
self._tail = _EMPTY_PLIST
def _append(self, elem, constructor):
if not self._tail:
self._head = constructor(elem)
self._tail = self._head
else:
self._tail.rest = constructor(elem)
self._tail = self._tail.rest
return self._head
def append_elem(self, elem):
return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
def append_plist(self, pl):
return self._append(pl, lambda l: l)
def build(self):
return self._head
class _PListBase(object):
__slots__ = ('__weakref__',)
# Selected implementations can be taken straight from the Sequence
# class, other are less suitable. Especially those that work with
# index lookups.
count = Sequence.count
index = Sequence.index
def __reduce__(self):
# Pickling support
return plist, (list(self),)
def __len__(self):
"""
Return the length of the list, computed by traversing it.
This is obviously O(n) but with the current implementation
where a list is also a node the overhead of storing the length
in every node would be quite significant.
"""
return sum(1 for _ in self)
def __repr__(self):
return "plist({0})".format(list(self))
__str__ = __repr__
def cons(self, elem):
"""
Return a new list with elem inserted as new head.
>>> plist([1, 2]).cons(3)
plist([3, 1, 2])
"""
return PList(elem, self)
def mcons(self, iterable):
"""
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
"""
head = self
for elem in iterable:
head = head.cons(elem)
return head
def reverse(self):
"""
Return a reversed version of list. Runs in O(n) where n is the length of the list.
>>> plist([1, 2, 3]).reverse()
plist([3, 2, 1])
Also supports the standard reversed function.
>>> reversed(plist([1, 2, 3]))
plist([3, 2, 1])
"""
result = plist()
head = self
while head:
result = result.cons(head.first)
head = head.rest
return result
__reversed__ = reverse
def split(self, index):
"""
Spilt the list at position specified by index. Returns a tuple containing the
list up until index and the list after the index. Runs in O(index).
>>> plist([1, 2, 3, 4]).split(2)
(plist([1, 2]), plist([3, 4]))
"""
lb = _PListBuilder()
right_list = self
i = 0
while right_list and i < index:
lb.append_elem(right_list.first)
right_list = right_list.rest
i += 1
if not right_list:
# Just a small optimization in the cases where no split occurred
return self, _EMPTY_PLIST
return lb.build(), right_list
def __iter__(self):
li = self
while li:
yield li.first
li = li.rest
def __lt__(self, other):
if not isinstance(other, _PListBase):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
"""
Traverses the lists, checking equality of elements.
This is an O(n) operation, but preserves the standard semantics of list equality.
"""
if not isinstance(other, _PListBase):
return NotImplemented
self_head = self
other_head = other
while self_head and other_head:
if not self_head.first == other_head.first:
return False
self_head = self_head.rest
other_head = other_head.rest
return not self_head and not other_head
def __getitem__(self, index):
# Don't use this this data structure if you plan to do a lot of indexing, it is
# very inefficient! Use a PVector instead!
if isinstance(index, slice):
if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
return self._drop(index.start)
# Take the easy way out for all other slicing cases, not much structural reuse possible anyway
return plist(tuple(self)[index])
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index < 0:
# NB: O(n)!
index += len(self)
try:
return self._drop(index).first
except AttributeError:
raise IndexError("PList index out of range")
def _drop(self, count):
if count < 0:
raise IndexError("PList index out of range")
head = self
while count > 0:
head = head.rest
count -= 1
return head
def __hash__(self):
return hash(tuple(self))
def remove(self, elem):
"""
Return new list with first element equal to elem removed. O(k) where k is the position
of the element that is removed.
Raises ValueError if no matching element is found.
>>> plist([1, 2, 1]).remove(1)
plist([2, 1])
"""
builder = _PListBuilder()
head = self
while head:
if head.first == elem:
return builder.append_plist(head.rest)
builder.append_elem(head.first)
head = head.rest
raise ValueError('{0} not found in PList'.format(elem))
class PList(_PListBase):
"""
Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
Element access is O(k) where k is the position of the element in the list. Taking the
length of the list is O(n).
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
create an instance.
Some examples:
>>> x = plist([1, 2])
>>> y = x.cons(3)
>>> x
plist([1, 2])
>>> y
plist([3, 1, 2])
>>> y.first
3
>>> y.rest == x
True
>>> y[:2]
plist([3, 1])
"""
__slots__ = ('first', 'rest')
def __new__(cls, first, rest):
instance = super(PList, cls).__new__(cls)
instance.first = first
instance.rest = rest
return instance
def __bool__(self):
return True
__nonzero__ = __bool__
Sequence.register(PList)
Hashable.register(PList)
class _EmptyPList(_PListBase):
__slots__ = ()
def __bool__(self):
return False
__nonzero__ = __bool__
@property
def first(self):
raise AttributeError("Empty PList has no first")
@property
def rest(self):
return self
Sequence.register(_EmptyPList)
Hashable.register(_EmptyPList)
_EMPTY_PLIST = _EmptyPList()
def plist(iterable=(), reverse=False):
"""
Creates a new persistent list containing all elements of iterable.
Optional parameter reverse specifies if the elements should be inserted in
reverse order or not.
>>> plist([1, 2, 3])
plist([1, 2, 3])
>>> plist([1, 2, 3], reverse=True)
plist([3, 2, 1])
"""
if not reverse:
iterable = list(iterable)
iterable.reverse()
return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
def l(*elements):
"""
Creates a new persistent list containing all arguments.
>>> l(1, 2, 3)
plist([1, 2, 3])
"""
return plist(elements) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_plist.py | 0.873269 | 0.348645 | _plist.py | pypi |
from ._compat import Mapping, Hashable
from itertools import chain
import six
from pyrsistent._pvector import pvector
from pyrsistent._transformations import transform
class PMap(object):
"""
Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
create an instance.
Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
excessive hash collisions.
This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
for example assignments and deletion of values.
PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
element access.
Random access and insert is log32(n) where n is the size of the map.
The following are examples of some common operations on persistent maps
>>> m1 = m(a=1, b=3)
>>> m2 = m1.set('c', 3)
>>> m3 = m2.remove('a')
>>> m1
pmap({'b': 3, 'a': 1})
>>> m2
pmap({'c': 3, 'b': 3, 'a': 1})
>>> m3
pmap({'c': 3, 'b': 3})
>>> m3['c']
3
>>> m3.c
3
"""
__slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
def __new__(cls, size, buckets):
self = super(PMap, cls).__new__(cls)
self._size = size
self._buckets = buckets
return self
@staticmethod
def _get_bucket(buckets, key):
index = hash(key) % len(buckets)
bucket = buckets[index]
return index, bucket
@staticmethod
def _getitem(buckets, key):
_, bucket = PMap._get_bucket(buckets, key)
if bucket:
for k, v in bucket:
if k == key:
return v
raise KeyError(key)
def __getitem__(self, key):
return PMap._getitem(self._buckets, key)
@staticmethod
def _contains(buckets, key):
_, bucket = PMap._get_bucket(buckets, key)
if bucket:
for k, _ in bucket:
if k == key:
return True
return False
return False
def __contains__(self, key):
return self._contains(self._buckets, key)
get = Mapping.get
def __iter__(self):
return self.iterkeys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(
"{0} has no attribute '{1}'".format(type(self).__name__, key)
)
def iterkeys(self):
for k, _ in self.iteritems():
yield k
# These are more efficient implementations compared to the original
# methods that are based on the keys iterator and then calls the
# accessor functions to access the value for the corresponding key
def itervalues(self):
for _, v in self.iteritems():
yield v
def iteritems(self):
for bucket in self._buckets:
if bucket:
for k, v in bucket:
yield k, v
def values(self):
return pvector(self.itervalues())
def keys(self):
return pvector(self.iterkeys())
def items(self):
return pvector(self.iteritems())
def __len__(self):
return self._size
def __repr__(self):
return 'pmap({0})'.format(str(dict(self)))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Mapping):
return NotImplemented
if len(self) != len(other):
return False
if isinstance(other, PMap):
if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
and self._cached_hash != other._cached_hash):
return False
if self._buckets == other._buckets:
return True
return dict(self.iteritems()) == dict(other.iteritems())
elif isinstance(other, dict):
return dict(self.iteritems()) == other
return dict(self.iteritems()) == dict(six.iteritems(other))
__ne__ = Mapping.__ne__
def __lt__(self, other):
raise TypeError('PMaps are not orderable')
__le__ = __lt__
__gt__ = __lt__
__ge__ = __lt__
def __str__(self):
return self.__repr__()
def __hash__(self):
if not hasattr(self, '_cached_hash'):
self._cached_hash = hash(frozenset(self.iteritems()))
return self._cached_hash
def set(self, key, val):
"""
Return a new PMap with key and val inserted.
>>> m1 = m(a=1, b=2)
>>> m2 = m1.set('a', 3)
>>> m3 = m1.set('c' ,4)
>>> m1
pmap({'b': 2, 'a': 1})
>>> m2
pmap({'b': 2, 'a': 3})
>>> m3
pmap({'c': 4, 'b': 2, 'a': 1})
"""
return self.evolver().set(key, val).persistent()
def remove(self, key):
"""
Return a new PMap without the element specified by key. Raises KeyError if the element
is not present.
>>> m1 = m(a=1, b=2)
>>> m1.remove('a')
pmap({'b': 2})
"""
return self.evolver().remove(key).persistent()
def discard(self, key):
"""
Return a new PMap without the element specified by key. Returns reference to itself
if element is not present.
>>> m1 = m(a=1, b=2)
>>> m1.discard('a')
pmap({'b': 2})
>>> m1 is m1.discard('c')
True
"""
try:
return self.remove(key)
except KeyError:
return self
def update(self, *maps):
"""
Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
maps the rightmost (last) value is inserted.
>>> m1 = m(a=1, b=2)
>>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35})
pmap({'c': 3, 'b': 2, 'a': 17, 'd': 35})
"""
return self.update_with(lambda l, r: r, *maps)
def update_with(self, update_fn, *maps):
"""
Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
maps the values will be merged using merge_fn going from left to right.
>>> from operator import add
>>> m1 = m(a=1, b=2)
>>> m1.update_with(add, m(a=2))
pmap({'b': 2, 'a': 3})
The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
>>> m1 = m(a=1)
>>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
pmap({'a': 1})
"""
evolver = self.evolver()
for map in maps:
for key, value in map.items():
evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
return evolver.persistent()
def __add__(self, other):
return self.update(other)
def __reduce__(self):
# Pickling support
return pmap, (dict(self),)
def transform(self, *transformations):
"""
Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
consists of two parts. One match expression that specifies which elements to transform
and one transformation function that performs the actual transformation.
>>> from pyrsistent import freeze, ny
>>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
... {'author': 'Steve', 'content': 'A slightly longer article'}],
... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
>>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
>>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
>>> very_short_news.articles[0].content
'A short article'
>>> very_short_news.articles[1].content
'A slightly long...'
When nothing has been transformed the original data structure is kept
>>> short_news is news_paper
True
>>> very_short_news is news_paper
False
>>> very_short_news.articles[0] is news_paper.articles[0]
True
"""
return transform(self, transformations)
def copy(self):
return self
class _Evolver(object):
__slots__ = ('_buckets_evolver', '_size', '_original_pmap')
def __init__(self, original_pmap):
self._original_pmap = original_pmap
self._buckets_evolver = original_pmap._buckets.evolver()
self._size = original_pmap._size
def __getitem__(self, key):
return PMap._getitem(self._buckets_evolver, key)
def __setitem__(self, key, val):
self.set(key, val)
def set(self, key, val):
if len(self._buckets_evolver) < 0.67 * self._size:
self._reallocate(2 * len(self._buckets_evolver))
kv = (key, val)
index, bucket = PMap._get_bucket(self._buckets_evolver, key)
if bucket:
for k, v in bucket:
if k == key:
if v is not val:
new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
self._buckets_evolver[index] = new_bucket
return self
new_bucket = [kv]
new_bucket.extend(bucket)
self._buckets_evolver[index] = new_bucket
self._size += 1
else:
self._buckets_evolver[index] = [kv]
self._size += 1
return self
def _reallocate(self, new_size):
new_list = new_size * [None]
buckets = self._buckets_evolver.persistent()
for k, v in chain.from_iterable(x for x in buckets if x):
index = hash(k) % new_size
if new_list[index]:
new_list[index].append((k, v))
else:
new_list[index] = [(k, v)]
# A reallocation should always result in a dirty buckets evolver to avoid
# possible loss of elements when doing the reallocation.
self._buckets_evolver = pvector().evolver()
self._buckets_evolver.extend(new_list)
def is_dirty(self):
return self._buckets_evolver.is_dirty()
def persistent(self):
if self.is_dirty():
self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
return self._original_pmap
def __len__(self):
return self._size
def __contains__(self, key):
return PMap._contains(self._buckets_evolver, key)
def __delitem__(self, key):
self.remove(key)
def remove(self, key):
index, bucket = PMap._get_bucket(self._buckets_evolver, key)
if bucket:
new_bucket = [(k, v) for (k, v) in bucket if k != key]
if len(bucket) > len(new_bucket):
self._buckets_evolver[index] = new_bucket if new_bucket else None
self._size -= 1
return self
raise KeyError('{0}'.format(key))
def evolver(self):
"""
Create a new evolver for this pmap. For a discussion on evolvers in general see the
documentation for the pvector evolver.
Create the evolver and perform various mutating updates to it:
>>> m1 = m(a=1, b=2)
>>> e = m1.evolver()
>>> e['c'] = 3
>>> len(e)
3
>>> del e['a']
The underlying pmap remains the same:
>>> m1
pmap({'b': 2, 'a': 1})
The changes are kept in the evolver. An updated pmap can be created using the
persistent() function on the evolver.
>>> m2 = e.persistent()
>>> m2
pmap({'c': 3, 'b': 2})
The new pmap will share data with the original pmap in the same way that would have
been done if only using operations on the pmap.
"""
return self._Evolver(self)
Mapping.register(PMap)
Hashable.register(PMap)
def _turbo_mapping(initial, pre_size):
if pre_size:
size = pre_size
else:
try:
size = 2 * len(initial) or 8
except Exception:
# Guess we can't figure out the length. Give up on length hinting,
# we can always reallocate later.
size = 8
buckets = size * [None]
if not isinstance(initial, Mapping):
# Make a dictionary of the initial data if it isn't already,
# that will save us some job further down since we can assume no
# key collisions
initial = dict(initial)
for k, v in six.iteritems(initial):
h = hash(k)
index = h % size
bucket = buckets[index]
if bucket:
bucket.append((k, v))
else:
buckets[index] = [(k, v)]
return PMap(len(initial), pvector().extend(buckets))
_EMPTY_PMAP = _turbo_mapping({}, 0)
def pmap(initial={}, pre_size=0):
"""
Create new persistent map, inserts all elements in initial into the newly created map.
The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
may have a positive performance impact in the cases where you know beforehand that a large number of elements
will be inserted into the map eventually since it will reduce the number of reallocations required.
>>> pmap({'a': 13, 'b': 14})
pmap({'b': 14, 'a': 13})
"""
if not initial:
return _EMPTY_PMAP
return _turbo_mapping(initial, pre_size)
def m(**kwargs):
"""
Creates a new persitent map. Inserts all key value arguments into the newly created map.
>>> m(a=13, b=14)
pmap({'b': 14, 'a': 13})
"""
return pmap(kwargs) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/pyrsistent/_pmap.py | 0.90313 | 0.60871 | _pmap.py | pypi |
from __future__ import absolute_import, division, print_function
from ._make import NOTHING, Factory, pipe
__all__ = [
"pipe",
"optional",
"default_if_none",
]
def optional(converter):
"""
A converter that allows an attribute to be optional. An optional attribute
is one which can be set to ``None``.
:param callable converter: the converter that is used for non-``None``
values.
.. versionadded:: 17.1.0
"""
def optional_converter(val):
if val is None:
return None
return converter(val)
return optional_converter
def default_if_none(default=NOTHING, factory=None):
"""
A converter that allows to replace ``None`` values by *default* or the
result of *factory*.
:param default: Value to be used if ``None`` is passed. Passing an instance
of `attr.Factory` is supported, however the ``takes_self`` option
is *not*.
:param callable factory: A callable that takes not parameters whose result
is used if ``None`` is passed.
:raises TypeError: If **neither** *default* or *factory* is passed.
:raises TypeError: If **both** *default* and *factory* are passed.
:raises ValueError: If an instance of `attr.Factory` is passed with
``takes_self=True``.
.. versionadded:: 18.2.0
"""
if default is NOTHING and factory is None:
raise TypeError("Must pass either `default` or `factory`.")
if default is not NOTHING and factory is not None:
raise TypeError(
"Must pass either `default` or `factory` but not both."
)
if factory is not None:
default = Factory(factory)
if isinstance(default, Factory):
if default.takes_self:
raise ValueError(
"`takes_self` is not supported by default_if_none."
)
def default_if_none_converter(val):
if val is not None:
return val
return default.factory()
else:
def default_if_none_converter(val):
if val is not None:
return val
return default
return default_if_none_converter | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/attr/converters.py | 0.877975 | 0.257826 | converters.py | pypi |
from __future__ import absolute_import, division, print_function
import re
from ._make import _AndValidator, and_, attrib, attrs
from .exceptions import NotCallableError
__all__ = [
"and_",
"deep_iterable",
"deep_mapping",
"in_",
"instance_of",
"is_callable",
"matches_re",
"optional",
"provides",
]
@attrs(repr=False, slots=True, hash=True)
class _InstanceOfValidator(object):
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r}).".format(
name=attr.name,
type=self.type,
actual=value.__class__,
value=value,
),
attr,
self.type,
value,
)
def __repr__(self):
return "<instance_of validator for type {type!r}>".format(
type=self.type
)
def instance_of(type):
"""
A validator that raises a `TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are performed using
`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected type, and the value it
got.
"""
return _InstanceOfValidator(type)
@attrs(repr=False, frozen=True, slots=True)
class _MatchesReValidator(object):
regex = attrib()
flags = attrib()
match_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.match_func(value):
raise ValueError(
"'{name}' must match regex {regex!r}"
" ({value!r} doesn't)".format(
name=attr.name, regex=self.regex.pattern, value=value
),
attr,
self.regex,
value,
)
def __repr__(self):
return "<matches_re validator for pattern {regex!r}>".format(
regex=self.regex
)
def matches_re(regex, flags=0, func=None):
r"""
A validator that raises `ValueError` if the initializer is called
with a string that doesn't match *regex*.
:param str regex: a regex string to match against
:param int flags: flags that will be passed to the underlying re function
(default 0)
:param callable func: which underlying `re` function to call (options
are `re.fullmatch`, `re.search`, `re.match`, default
is ``None`` which means either `re.fullmatch` or an emulation of
it on Python 2). For performance reasons, they won't be used directly
but on a pre-`re.compile`\ ed pattern.
.. versionadded:: 19.2.0
"""
fullmatch = getattr(re, "fullmatch", None)
valid_funcs = (fullmatch, None, re.search, re.match)
if func not in valid_funcs:
raise ValueError(
"'func' must be one of %s."
% (
", ".join(
sorted(
e and e.__name__ or "None" for e in set(valid_funcs)
)
),
)
)
pattern = re.compile(regex, flags)
if func is re.match:
match_func = pattern.match
elif func is re.search:
match_func = pattern.search
else:
if fullmatch:
match_func = pattern.fullmatch
else:
pattern = re.compile(r"(?:{})\Z".format(regex), flags)
match_func = pattern.match
return _MatchesReValidator(pattern, flags, match_func)
@attrs(repr=False, slots=True, hash=True)
class _ProvidesValidator(object):
interface = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't.".format(
name=attr.name, interface=self.interface, value=value
),
attr,
self.interface,
value,
)
def __repr__(self):
return "<provides validator for interface {interface!r}>".format(
interface=self.interface
)
def provides(interface):
"""
A validator that raises a `TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<https://zopeinterface.readthedocs.io/en/latest/>`_).
:param interface: The interface to check for.
:type interface: ``zope.interface.Interface``
:raises TypeError: With a human readable error message, the attribute
(of type `attr.Attribute`), the expected interface, and the
value it got.
"""
return _ProvidesValidator(interface)
@attrs(repr=False, slots=True, hash=True)
class _OptionalValidator(object):
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return "<optional validator for {what} or None>".format(
what=repr(self.validator)
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or `list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, hash=True)
class _InValidator(object):
options = attrib()
def __call__(self, inst, attr, value):
try:
in_options = value in self.options
except TypeError: # e.g. `1 in "abc"`
in_options = False
if not in_options:
raise ValueError(
"'{name}' must be in {options!r} (got {value!r})".format(
name=attr.name, options=self.options, value=value
)
)
def __repr__(self):
return "<in_ validator with options {options!r}>".format(
options=self.options
)
def in_(options):
"""
A validator that raises a `ValueError` if the initializer is called
with a value that does not belong in the options provided. The check is
performed using ``value in options``.
:param options: Allowed options.
:type options: list, tuple, `enum.Enum`, ...
:raises ValueError: With a human readable error message, the attribute (of
type `attr.Attribute`), the expected options, and the value it
got.
.. versionadded:: 17.1.0
"""
return _InValidator(options)
@attrs(repr=False, slots=False, hash=True)
class _IsCallableValidator(object):
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.name, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attr.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute
that is not callable.
.. versionadded:: 19.1.0
:raises `attr.exceptions.NotCallableError`: With a human readable error
message containing the attribute (`attr.Attribute`) name,
and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, hash=True)
class _DeepIterable(object):
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
class _DeepMapping(object):
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/attr/validators.py | 0.880026 | 0.318075 | validators.py | pypi |
from __future__ import absolute_import, division, print_function
from functools import total_ordering
from ._funcs import astuple
from ._make import attrib, attrs
@total_ordering
@attrs(eq=False, order=False, slots=True, frozen=True)
class VersionInfo(object):
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
)
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/attr/_version_info.py | 0.79166 | 0.209429 | _version_info.py | pypi |
from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import NOTHING, _obj_setattr, fields
from .exceptions import AttrsAttributeNotFoundError
def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
value_serializer=None,
):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:param Optional[callable] value_serializer: A hook that is called for every
attribute or dict key/value. It receives the current instance, field
and value and must return the (updated) value. The hook is run *after*
the optional *filter* has been applied.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
.. versionadded:: 20.3.0 *value_serializer*
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if value_serializer is not None:
v = value_serializer(inst, a, v)
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v,
True,
filter,
dict_factory,
retain_collection_types,
value_serializer,
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i,
filter,
dict_factory,
retain_collection_types,
value_serializer,
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk,
filter,
df,
retain_collection_types,
value_serializer,
),
_asdict_anything(
vv,
filter,
df,
retain_collection_types,
value_serializer,
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def _asdict_anything(
val,
filter,
dict_factory,
retain_collection_types,
value_serializer,
):
"""
``asdict`` only works on attrs instances, this works on anything.
"""
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(
val,
True,
filter,
dict_factory,
retain_collection_types,
value_serializer,
)
elif isinstance(val, (tuple, list, set, frozenset)):
cf = val.__class__ if retain_collection_types is True else list
rv = cf(
[
_asdict_anything(
i,
filter,
dict_factory,
retain_collection_types,
value_serializer,
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(
kk, filter, df, retain_collection_types, value_serializer
),
_asdict_anything(
vv, filter, df, retain_collection_types, value_serializer
),
)
for kk, vv in iteritems(val)
)
else:
rv = val
if value_serializer is not None:
rv = value_serializer(None, None, rv)
return rv
def astuple(
inst,
recurse=True,
filter=None,
tuple_factory=tuple,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a tuple.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attr.Attribute` as the first argument and the
value as the second argument.
:param callable tuple_factory: A callable to produce tuples from. For
example, to produce lists instead of tuples.
:param bool retain_collection_types: Do not convert to ``list``
or ``dict`` when encountering an attribute which type is
``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
``True``.
:rtype: return type of *tuple_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.2.0
"""
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(
astuple(
v,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain is True else list
rv.append(
cf(
[
astuple(
j,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(j.__class__)
else j
for j in v
]
)
)
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(
df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(kk.__class__)
else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(vv.__class__)
else vv,
)
for kk, vv in iteritems(v)
)
)
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
be found on *cls*.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. deprecated:: 17.1.0
Use `evolve` instead.
"""
import warnings
warnings.warn(
"assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning,
stacklevel=2,
)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}.".format(
k=k, cl=new.__class__
)
)
_obj_setattr(new, k, v)
return new
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
def resolve_types(cls, globalns=None, localns=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attr.s`. That means
the decorator has to come in the line **before** `attr.s`.
.. versionadded:: 20.1.0
"""
try:
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
cls.__attrs_types_resolved__
except AttributeError:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls):
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
cls.__attrs_types_resolved__ = True
# Return the class so you can use it as a decorator too.
return cls | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/attr/_funcs.py | 0.821188 | 0.209025 | _funcs.py | pypi |
from __future__ import division
from warnings import warn
import contextlib
import json
import numbers
from six import add_metaclass
from jsonschema import (
_legacy_validators,
_types,
_utils,
_validators,
exceptions,
)
from jsonschema.compat import (
Sequence,
int_types,
iteritems,
lru_cache,
str_types,
unquote,
urldefrag,
urljoin,
urlopen,
urlsplit,
)
# Sigh. https://gitlab.com/pycqa/flake8/issues/280
# https://github.com/pyga/ebb-lint/issues/7
# Imported for backwards compatibility.
from jsonschema.exceptions import ErrorTree
ErrorTree
class _DontDoThat(Exception):
"""
Raised when a Validators with non-default type checker is misused.
Asking one for DEFAULT_TYPES doesn't make sense, since type checkers
exist for the unrepresentable cases where DEFAULT_TYPES can't
represent the type relationship.
"""
def __str__(self):
return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers"
validators = {}
meta_schemas = _utils.URIDict()
def _generate_legacy_type_checks(types=()):
"""
Generate newer-style type checks out of JSON-type-name-to-type mappings.
Arguments:
types (dict):
A mapping of type names to their Python types
Returns:
A dictionary of definitions to pass to `TypeChecker`
"""
types = dict(types)
def gen_type_check(pytypes):
pytypes = _utils.flatten(pytypes)
def type_check(checker, instance):
if isinstance(instance, bool):
if bool not in pytypes:
return False
return isinstance(instance, pytypes)
return type_check
definitions = {}
for typename, pytypes in iteritems(types):
definitions[typename] = gen_type_check(pytypes)
return definitions
_DEPRECATED_DEFAULT_TYPES = {
u"array": list,
u"boolean": bool,
u"integer": int_types,
u"null": type(None),
u"number": numbers.Number,
u"object": dict,
u"string": str_types,
}
_TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES),
)
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
collections.Callable:
a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
if meta_schema_id:
meta_schemas[meta_schema_id] = cls
return cls
return _validates
def _DEFAULT_TYPES(self):
if self._CREATED_WITH_DEFAULT_TYPES is None:
raise _DontDoThat()
warn(
(
"The DEFAULT_TYPES attribute is deprecated. "
"See the type checker attached to this validator instead."
),
DeprecationWarning,
stacklevel=2,
)
return self._DEFAULT_TYPES
class _DefaultTypesDeprecatingMetaClass(type):
DEFAULT_TYPES = property(_DEFAULT_TYPES)
def _id_of(schema):
if schema is True or schema is False:
return u""
return schema.get(u"$id", u"")
def create(
meta_schema,
validators=(),
version=None,
default_types=None,
type_checker=None,
id_of=_id_of,
):
"""
Create a new validator class.
Arguments:
meta_schema (collections.Mapping):
the meta schema for the new validator class
validators (collections.Mapping):
a mapping from names to callables, where each callable will
validate the schema property with the given name.
Each callable should take 4 arguments:
1. a validator instance,
2. the value of the property being validated within the
instance
3. the instance
4. the schema
version (str):
an identifier for the version that this validator class will
validate. If provided, the returned validator class will
have its ``__name__`` set to include the version, and also
will have `jsonschema.validators.validates` automatically
called for the given version.
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, a `jsonschema.TypeChecker` will be created
with a set of default types typical of JSON Schema drafts.
default_types (collections.Mapping):
.. deprecated:: 3.0.0
Please use the type_checker argument instead.
If set, it provides mappings of JSON types to Python types
that will be converted to functions and redefined in this
object's `jsonschema.TypeChecker`.
id_of (collections.Callable):
A function that given a schema, returns its ID.
Returns:
a new `jsonschema.IValidator` class
"""
if default_types is not None:
if type_checker is not None:
raise TypeError(
"Do not specify default_types when providing a type checker.",
)
_created_with_default_types = True
warn(
(
"The default_types argument is deprecated. "
"Use the type_checker argument instead."
),
DeprecationWarning,
stacklevel=2,
)
type_checker = _types.TypeChecker(
type_checkers=_generate_legacy_type_checks(default_types),
)
else:
default_types = _DEPRECATED_DEFAULT_TYPES
if type_checker is None:
_created_with_default_types = False
type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES
elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES:
_created_with_default_types = False
else:
_created_with_default_types = None
@add_metaclass(_DefaultTypesDeprecatingMetaClass)
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
TYPE_CHECKER = type_checker
ID_OF = staticmethod(id_of)
DEFAULT_TYPES = property(_DEFAULT_TYPES)
_DEFAULT_TYPES = dict(default_types)
_CREATED_WITH_DEFAULT_TYPES = _created_with_default_types
def __init__(
self,
schema,
types=(),
resolver=None,
format_checker=None,
):
if types:
warn(
(
"The types argument is deprecated. Provide "
"a type_checker to jsonschema.validators.extend "
"instead."
),
DeprecationWarning,
stacklevel=2,
)
self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many(
_generate_legacy_type_checks(types),
)
if resolver is None:
resolver = RefResolver.from_schema(schema, id_of=id_of)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise exceptions.SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
if _schema is True:
return
elif _schema is False:
yield exceptions.ValidationError(
"False schema does not allow %r" % (instance,),
validator=None,
validator_value=None,
instance=instance,
schema=_schema,
)
return
scope = id_of(_schema)
if scope:
self.resolver.push_scope(scope)
try:
ref = _schema.get(u"$ref")
if ref is not None:
validators = [(u"$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != u"$ref":
error.schema_path.appendleft(k)
yield error
finally:
if scope:
self.resolver.pop_scope()
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
try:
return self.TYPE_CHECKER.is_type(instance, type)
except exceptions.UndefinedTypeCheck:
raise exceptions.UnknownType(type, instance, self.schema)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator
def extend(validator, validators=(), version=None, type_checker=None):
"""
Create a new validator class by extending an existing one.
Arguments:
validator (jsonschema.IValidator):
an existing validator class
validators (collections.Mapping):
a mapping of new validator callables to extend with, whose
structure is as in `create`.
.. note::
Any validator callables with the same name as an
existing one will (silently) replace the old validator
callable entirely, effectively overriding any validation
done in the "parent" validator class.
If you wish to instead extend the behavior of a parent's
validator callable, delegate and call it directly in
the new validator function by retrieving it using
``OldValidator.VALIDATORS["validator_name"]``.
version (str):
a version for the new validator class
type_checker (jsonschema.TypeChecker):
a type checker, used when applying the :validator:`type` validator.
If unprovided, the type checker of the extended
`jsonschema.IValidator` will be carried along.`
Returns:
a new `jsonschema.IValidator` class extending the one provided
.. note:: Meta Schemas
The new validator class will have its parent's meta schema.
If you wish to change or extend the meta schema in the new
validator class, modify ``META_SCHEMA`` directly on the returned
class. Note that no implicit copying is done, so a copy should
likely be made before modifying it, in order to not affect the
old validator.
"""
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
if type_checker is None:
type_checker = validator.TYPE_CHECKER
elif validator._CREATED_WITH_DEFAULT_TYPES:
raise TypeError(
"Cannot extend a validator created with default_types "
"with a type_checker. Update the validator to use a "
"type_checker when created."
)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
type_checker=type_checker,
id_of=validator.ID_OF,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"dependencies": _legacy_validators.dependencies_draft3,
u"disallow": _legacy_validators.disallow_draft3,
u"divisibleBy": _validators.multipleOf,
u"enum": _validators.enum,
u"extends": _legacy_validators.extends_draft3,
u"format": _validators.format,
u"items": _legacy_validators.items_draft3_draft4,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maximum": _legacy_validators.maximum_draft3_draft4,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minimum": _legacy_validators.minimum_draft3_draft4,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _legacy_validators.properties_draft3,
u"type": _legacy_validators.type_draft3,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft3_type_checker,
version="draft3",
id_of=lambda schema: schema.get(u"id", ""),
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"format": _validators.format,
u"items": _legacy_validators.items_draft3_draft4,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _legacy_validators.maximum_draft3_draft4,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _legacy_validators.minimum_draft3_draft4,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_,
u"oneOf": _validators.oneOf,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft4_type_checker,
version="draft4",
id_of=lambda schema: schema.get(u"id", ""),
)
Draft6Validator = create(
meta_schema=_utils.load_schema("draft6"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"const": _validators.const,
u"contains": _validators.contains,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"exclusiveMaximum": _validators.exclusiveMaximum,
u"exclusiveMinimum": _validators.exclusiveMinimum,
u"format": _validators.format,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"not": _validators.not_,
u"oneOf": _validators.oneOf,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"propertyNames": _validators.propertyNames,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft6_type_checker,
version="draft6",
)
Draft7Validator = create(
meta_schema=_utils.load_schema("draft7"),
validators={
u"$ref": _validators.ref,
u"additionalItems": _validators.additionalItems,
u"additionalProperties": _validators.additionalProperties,
u"allOf": _validators.allOf,
u"anyOf": _validators.anyOf,
u"const": _validators.const,
u"contains": _validators.contains,
u"dependencies": _validators.dependencies,
u"enum": _validators.enum,
u"exclusiveMaximum": _validators.exclusiveMaximum,
u"exclusiveMinimum": _validators.exclusiveMinimum,
u"format": _validators.format,
u"if": _validators.if_,
u"items": _validators.items,
u"maxItems": _validators.maxItems,
u"maxLength": _validators.maxLength,
u"maxProperties": _validators.maxProperties,
u"maximum": _validators.maximum,
u"minItems": _validators.minItems,
u"minLength": _validators.minLength,
u"minProperties": _validators.minProperties,
u"minimum": _validators.minimum,
u"multipleOf": _validators.multipleOf,
u"oneOf": _validators.oneOf,
u"not": _validators.not_,
u"pattern": _validators.pattern,
u"patternProperties": _validators.patternProperties,
u"properties": _validators.properties,
u"propertyNames": _validators.propertyNames,
u"required": _validators.required,
u"type": _validators.type,
u"uniqueItems": _validators.uniqueItems,
},
type_checker=_types.draft7_type_checker,
version="draft7",
)
_LATEST_VERSION = Draft7Validator
class RefResolver(object):
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
"""
def __init__(
self,
base_uri,
referrer,
store=(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
)
self.store.update(store)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
@classmethod
def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`RefResolver`
"""
return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs)
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions.RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`"
)
@property
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
@property
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
"""
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref)
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given remote URL.
"""
url, fragment = urldefrag(url)
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions.RefResolutionError(exc)
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip(u"/")
parts = unquote(fragment).split(u"/") if fragment else []
for part in parts:
part = part.replace(u"~1", u"/").replace(u"~0", u"~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise exceptions.RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in [u"http", u"https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
with urlopen(uri) as url:
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is
itself valid, since not doing so can lead to less obvious error
messages and fail in less obvious or consistent ways.
If you know you have a valid schema already, especially if you
intend to validate multiple instances with the same schema, you
likely would prefer using the `IValidator.validate` method directly
on a specific validator (e.g. ``Draft7Validator.validate``).
Arguments:
instance:
The instance to validate
schema:
The schema to validate with
cls (IValidator):
The class that will be used to validate the instance.
If the ``cls`` argument is not provided, two things will happen
in accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_
then the proper validator will be used. The specification recommends
that all schemas contain :validator:`$schema` properties for this
reason. If no :validator:`$schema` property is found, the default
validator class is the latest released draft.
Any other provided positional and keyword arguments will be passed
on when instantiating the ``cls``.
Raises:
`jsonschema.exceptions.ValidationError` if the instance
is invalid
`jsonschema.exceptions.SchemaError` if the schema itself
is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with
`jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
error = exceptions.best_match(validator.iter_errors(instance))
if error is not None:
raise error
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the
given schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/jsonschema/validators.py | 0.820397 | 0.223017 | validators.py | pypi |
import re
from jsonschema._utils import (
ensure_list,
equal,
extras_msg,
find_additional_properties,
types_msg,
unbool,
uniq,
)
from jsonschema.exceptions import FormatError, ValidationError
from jsonschema.compat import iteritems
def patternProperties(validator, patternProperties, instance, schema):
if not validator.is_type(instance, "object"):
return
for pattern, subschema in iteritems(patternProperties):
for k, v in iteritems(instance):
if re.search(pattern, k):
for error in validator.descend(
v, subschema, path=k, schema_path=pattern,
):
yield error
def propertyNames(validator, propertyNames, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in instance:
for error in validator.descend(
instance=property,
schema=propertyNames,
):
yield error
def additionalProperties(validator, aP, instance, schema):
if not validator.is_type(instance, "object"):
return
extras = set(find_additional_properties(instance, schema))
if validator.is_type(aP, "object"):
for extra in extras:
for error in validator.descend(instance[extra], aP, path=extra):
yield error
elif not aP and extras:
if "patternProperties" in schema:
patterns = sorted(schema["patternProperties"])
if len(extras) == 1:
verb = "does"
else:
verb = "do"
error = "%s %s not match any of the regexes: %s" % (
", ".join(map(repr, sorted(extras))),
verb,
", ".join(map(repr, patterns)),
)
yield ValidationError(error)
else:
error = "Additional properties are not allowed (%s %s unexpected)"
yield ValidationError(error % extras_msg(extras))
def items(validator, items, instance, schema):
if not validator.is_type(instance, "array"):
return
if validator.is_type(items, "array"):
for (index, item), subschema in zip(enumerate(instance), items):
for error in validator.descend(
item, subschema, path=index, schema_path=index,
):
yield error
else:
for index, item in enumerate(instance):
for error in validator.descend(item, items, path=index):
yield error
def additionalItems(validator, aI, instance, schema):
if (
not validator.is_type(instance, "array") or
validator.is_type(schema.get("items", {}), "object")
):
return
len_items = len(schema.get("items", []))
if validator.is_type(aI, "object"):
for index, item in enumerate(instance[len_items:], start=len_items):
for error in validator.descend(item, aI, path=index):
yield error
elif not aI and len(instance) > len(schema.get("items", [])):
error = "Additional items are not allowed (%s %s unexpected)"
yield ValidationError(
error %
extras_msg(instance[len(schema.get("items", [])):])
)
def const(validator, const, instance, schema):
if not equal(instance, const):
yield ValidationError("%r was expected" % (const,))
def contains(validator, contains, instance, schema):
if not validator.is_type(instance, "array"):
return
if not any(validator.is_valid(element, contains) for element in instance):
yield ValidationError(
"None of %r are valid under the given schema" % (instance,)
)
def exclusiveMinimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance <= minimum:
yield ValidationError(
"%r is less than or equal to the minimum of %r" % (
instance, minimum,
),
)
def exclusiveMaximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance >= maximum:
yield ValidationError(
"%r is greater than or equal to the maximum of %r" % (
instance, maximum,
),
)
def minimum(validator, minimum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance < minimum:
yield ValidationError(
"%r is less than the minimum of %r" % (instance, minimum)
)
def maximum(validator, maximum, instance, schema):
if not validator.is_type(instance, "number"):
return
if instance > maximum:
yield ValidationError(
"%r is greater than the maximum of %r" % (instance, maximum)
)
def multipleOf(validator, dB, instance, schema):
if not validator.is_type(instance, "number"):
return
if isinstance(dB, float):
quotient = instance / dB
failed = int(quotient) != quotient
else:
failed = instance % dB
if failed:
yield ValidationError("%r is not a multiple of %r" % (instance, dB))
def minItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) < mI:
yield ValidationError("%r is too short" % (instance,))
def maxItems(validator, mI, instance, schema):
if validator.is_type(instance, "array") and len(instance) > mI:
yield ValidationError("%r is too long" % (instance,))
def uniqueItems(validator, uI, instance, schema):
if (
uI and
validator.is_type(instance, "array") and
not uniq(instance)
):
yield ValidationError("%r has non-unique elements" % (instance,))
def pattern(validator, patrn, instance, schema):
if (
validator.is_type(instance, "string") and
not re.search(patrn, instance)
):
yield ValidationError("%r does not match %r" % (instance, patrn))
def format(validator, format, instance, schema):
if validator.format_checker is not None:
try:
validator.format_checker.check(instance, format)
except FormatError as error:
yield ValidationError(error.message, cause=error.cause)
def minLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) < mL:
yield ValidationError("%r is too short" % (instance,))
def maxLength(validator, mL, instance, schema):
if validator.is_type(instance, "string") and len(instance) > mL:
yield ValidationError("%r is too long" % (instance,))
def dependencies(validator, dependencies, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, dependency in iteritems(dependencies):
if property not in instance:
continue
if validator.is_type(dependency, "array"):
for each in dependency:
if each not in instance:
message = "%r is a dependency of %r"
yield ValidationError(message % (each, property))
else:
for error in validator.descend(
instance, dependency, schema_path=property,
):
yield error
def enum(validator, enums, instance, schema):
if instance == 0 or instance == 1:
unbooled = unbool(instance)
if all(unbooled != unbool(each) for each in enums):
yield ValidationError("%r is not one of %r" % (instance, enums))
elif instance not in enums:
yield ValidationError("%r is not one of %r" % (instance, enums))
def ref(validator, ref, instance, schema):
resolve = getattr(validator.resolver, "resolve", None)
if resolve is None:
with validator.resolver.resolving(ref) as resolved:
for error in validator.descend(instance, resolved):
yield error
else:
scope, resolved = validator.resolver.resolve(ref)
validator.resolver.push_scope(scope)
try:
for error in validator.descend(instance, resolved):
yield error
finally:
validator.resolver.pop_scope()
def type(validator, types, instance, schema):
types = ensure_list(types)
if not any(validator.is_type(instance, type) for type in types):
yield ValidationError(types_msg(instance, types))
def properties(validator, properties, instance, schema):
if not validator.is_type(instance, "object"):
return
for property, subschema in iteritems(properties):
if property in instance:
for error in validator.descend(
instance[property],
subschema,
path=property,
schema_path=property,
):
yield error
def required(validator, required, instance, schema):
if not validator.is_type(instance, "object"):
return
for property in required:
if property not in instance:
yield ValidationError("%r is a required property" % property)
def minProperties(validator, mP, instance, schema):
if validator.is_type(instance, "object") and len(instance) < mP:
yield ValidationError(
"%r does not have enough properties" % (instance,)
)
def maxProperties(validator, mP, instance, schema):
if not validator.is_type(instance, "object"):
return
if validator.is_type(instance, "object") and len(instance) > mP:
yield ValidationError("%r has too many properties" % (instance,))
def allOf(validator, allOf, instance, schema):
for index, subschema in enumerate(allOf):
for error in validator.descend(instance, subschema, schema_path=index):
yield error
def anyOf(validator, anyOf, instance, schema):
all_errors = []
for index, subschema in enumerate(anyOf):
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
def oneOf(validator, oneOf, instance, schema):
subschemas = enumerate(oneOf)
all_errors = []
for index, subschema in subschemas:
errs = list(validator.descend(instance, subschema, schema_path=index))
if not errs:
first_valid = subschema
break
all_errors.extend(errs)
else:
yield ValidationError(
"%r is not valid under any of the given schemas" % (instance,),
context=all_errors,
)
more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
if more_valid:
more_valid.append(first_valid)
reprs = ", ".join(repr(schema) for schema in more_valid)
yield ValidationError(
"%r is valid under each of %s" % (instance, reprs)
)
def not_(validator, not_schema, instance, schema):
if validator.is_valid(instance, not_schema):
yield ValidationError(
"%r is not allowed for %r" % (not_schema, instance)
)
def if_(validator, if_schema, instance, schema):
if validator.is_valid(instance, if_schema):
if u"then" in schema:
then = schema[u"then"]
for error in validator.descend(instance, then, schema_path="then"):
yield error
elif u"else" in schema:
else_ = schema[u"else"]
for error in validator.descend(instance, else_, schema_path="else"):
yield error | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/jsonschema/_validators.py | 0.54819 | 0.169715 | _validators.py | pypi |
import numbers
from pyrsistent import pmap
import attr
from jsonschema.compat import int_types, str_types
from jsonschema.exceptions import UndefinedTypeCheck
def is_array(checker, instance):
return isinstance(instance, list)
def is_bool(checker, instance):
return isinstance(instance, bool)
def is_integer(checker, instance):
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
return False
return isinstance(instance, int_types)
def is_null(checker, instance):
return instance is None
def is_number(checker, instance):
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
return False
return isinstance(instance, numbers.Number)
def is_object(checker, instance):
return isinstance(instance, dict)
def is_string(checker, instance):
return isinstance(instance, str_types)
def is_any(checker, instance):
return True
@attr.s(frozen=True)
class TypeChecker(object):
"""
A ``type`` property checker.
A `TypeChecker` performs type checking for an `IValidator`. Type
checks to perform are updated using `TypeChecker.redefine` or
`TypeChecker.redefine_many` and removed via `TypeChecker.remove`.
Each of these return a new `TypeChecker` object.
Arguments:
type_checkers (dict):
The initial mapping of types to their checking functions.
"""
_type_checkers = attr.ib(default=pmap(), converter=pmap)
def is_type(self, instance, type):
"""
Check if the instance is of the appropriate type.
Arguments:
instance (object):
The instance to check
type (str):
The name of the type that is expected.
Returns:
bool: Whether it conformed.
Raises:
`jsonschema.exceptions.UndefinedTypeCheck`:
if type is unknown to this object.
"""
try:
fn = self._type_checkers[type]
except KeyError:
raise UndefinedTypeCheck(type)
return fn(self, instance)
def redefine(self, type, fn):
"""
Produce a new checker with the given type redefined.
Arguments:
type (str):
The name of the type to check.
fn (collections.Callable):
A function taking exactly two parameters - the type
checker calling the function and the instance to check.
The function should return true if instance is of this
type and false otherwise.
Returns:
A new `TypeChecker` instance.
"""
return self.redefine_many({type: fn})
def redefine_many(self, definitions=()):
"""
Produce a new checker with the given types redefined.
Arguments:
definitions (dict):
A dictionary mapping types to their checking functions.
Returns:
A new `TypeChecker` instance.
"""
return attr.evolve(
self, type_checkers=self._type_checkers.update(definitions),
)
def remove(self, *types):
"""
Produce a new checker with the given types forgotten.
Arguments:
types (~collections.Iterable):
the names of the types to remove.
Returns:
A new `TypeChecker` instance
Raises:
`jsonschema.exceptions.UndefinedTypeCheck`:
if any given type is unknown to this object
"""
checkers = self._type_checkers
for each in types:
try:
checkers = checkers.remove(each)
except KeyError:
raise UndefinedTypeCheck(each)
return attr.evolve(self, type_checkers=checkers)
draft3_type_checker = TypeChecker(
{
u"any": is_any,
u"array": is_array,
u"boolean": is_bool,
u"integer": is_integer,
u"object": is_object,
u"null": is_null,
u"number": is_number,
u"string": is_string,
},
)
draft4_type_checker = draft3_type_checker.remove(u"any")
draft6_type_checker = draft4_type_checker.redefine(
u"integer",
lambda checker, instance: (
is_integer(checker, instance) or
isinstance(instance, float) and instance.is_integer()
),
)
draft7_type_checker = draft6_type_checker | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/jsonschema/_types.py | 0.735642 | 0.392628 | _types.py | pypi |
import itertools
import json
import os
import re
from jsonschema.compat import MutableMapping, str_types, urlsplit
class URIDict(MutableMapping):
"""
Dictionary which uses normalized URIs as keys.
"""
def normalize(self, uri):
return urlsplit(uri).geturl()
def __init__(self, *args, **kwargs):
self.store = dict()
self.store.update(*args, **kwargs)
def __getitem__(self, uri):
return self.store[self.normalize(uri)]
def __setitem__(self, uri, value):
self.store[self.normalize(uri)] = value
def __delitem__(self, uri):
del self.store[self.normalize(uri)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __repr__(self):
return repr(self.store)
class Unset(object):
"""
An as-of-yet unset attribute or unprovided default parameter.
"""
def __repr__(self):
return "<unset>"
def load_schema(name):
"""
Load a schema from ./schemas/``name``.json and return it.
"""
with open(
os.path.join(os.path.dirname(__file__), "schemas", "{0}.json".format(name))
) as f:
data = f.read()
return json.loads(data)
def indent(string, times=1):
"""
A dumb version of `textwrap.indent` from Python 3.3.
"""
return "\n".join(" " * (4 * times) + line for line in string.splitlines())
def format_as_index(indices):
"""
Construct a single string containing indexing operations for the indices.
For example, [1, 2, "foo"] -> [1][2]["foo"]
Arguments:
indices (sequence):
The indices to format.
"""
if not indices:
return ""
return "[%s]" % "][".join(repr(index) for index in indices)
def find_additional_properties(instance, schema):
"""
Return the set of additional properties for the given ``instance``.
Weeds out properties that should have been validated by ``properties`` and
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
"""
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
for property in instance:
if property not in properties:
if patterns and re.search(patterns, property):
continue
yield property
def extras_msg(extras):
"""
Create an error message for extra items or properties.
"""
if len(extras) == 1:
verb = "was"
else:
verb = "were"
return ", ".join(repr(extra) for extra in extras), verb
def types_msg(instance, types):
"""
Create an error message for a failure to match the given types.
If the ``instance`` is an object and contains a ``name`` property, it will
be considered to be a description of that object and used as its type.
Otherwise the message is simply the reprs of the given ``types``.
"""
reprs = []
for type in types:
try:
reprs.append(repr(type["name"]))
except Exception:
reprs.append(repr(type))
return "%r is not of type %s" % (instance, ", ".join(reprs))
def flatten(suitable_for_isinstance):
"""
isinstance() can accept a bunch of really annoying different types:
* a single type
* a tuple of types
* an arbitrary nested tree of tuples
Return a flattened tuple of the given argument.
"""
types = set()
if not isinstance(suitable_for_isinstance, tuple):
suitable_for_isinstance = (suitable_for_isinstance,)
for thing in suitable_for_isinstance:
if isinstance(thing, tuple):
types.update(flatten(thing))
else:
types.add(thing)
return tuple(types)
def ensure_list(thing):
"""
Wrap ``thing`` in a list if it's a single str.
Otherwise, return it unchanged.
"""
if isinstance(thing, str_types):
return [thing]
return thing
def equal(one, two):
"""
Check if two things are equal, but evade booleans and ints being equal.
"""
return unbool(one) == unbool(two)
def unbool(element, true=object(), false=object()):
"""
A hack to make True and 1 and False and 0 unique for ``uniq``.
"""
if element is True:
return true
elif element is False:
return false
return element
def uniq(container):
"""
Check if all of a container's elements are unique.
Successively tries first to rely that the elements are hashable, then
falls back on them being sortable, and finally falls back on brute
force.
"""
try:
return len(set(unbool(i) for i in container)) == len(container)
except TypeError:
try:
sort = sorted(unbool(i) for i in container)
sliced = itertools.islice(sort, 1, None)
for i, j in zip(sort, sliced):
if i == j:
return False
except (NotImplementedError, TypeError):
seen = []
for e in container:
e = unbool(e)
if e in seen:
return False
seen.append(e)
return True | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/jsonschema/_utils.py | 0.737536 | 0.260166 | _utils.py | pypi |
import datetime
import re
import socket
import struct
from jsonschema.compat import str_types
from jsonschema.exceptions import FormatError
class FormatChecker(object):
"""
A ``format`` property checker.
JSON Schema does not mandate that the ``format`` property actually do any
validation. If validation is desired however, instances of this class can
be hooked into validators to enable format validation.
`FormatChecker` objects always return ``True`` when asked about
formats that they do not know how to validate.
To check a custom format using a function that takes an instance and
returns a ``bool``, use the `FormatChecker.checks` or
`FormatChecker.cls_checks` decorators.
Arguments:
formats (~collections.Iterable):
The known formats to validate. This argument can be used to
limit which formats will be used during validation.
"""
checkers = {}
def __init__(self, formats=None):
if formats is None:
self.checkers = self.checkers.copy()
else:
self.checkers = dict((k, self.checkers[k]) for k in formats)
def __repr__(self):
return "<FormatChecker checkers={}>".format(sorted(self.checkers))
def checks(self, format, raises=()):
"""
Register a decorated function as validating a new format.
Arguments:
format (str):
The format that the decorated function will check.
raises (Exception):
The exception(s) raised by the decorated function when an
invalid instance is found.
The exception object will be accessible as the
`jsonschema.exceptions.ValidationError.cause` attribute of the
resulting validation error.
"""
def _checks(func):
self.checkers[format] = (func, raises)
return func
return _checks
cls_checks = classmethod(checks)
def check(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Raises:
FormatError: if the instance does not conform to ``format``
"""
if format not in self.checkers:
return
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
raise FormatError(
"%r is not a %r" % (instance, format), cause=cause,
)
def conforms(self, instance, format):
"""
Check whether the instance conforms to the given format.
Arguments:
instance (*any primitive type*, i.e. str, number, bool):
The instance to check
format (str):
The format that instance should conform to
Returns:
bool: whether it conformed
"""
try:
self.check(instance, format)
except FormatError:
return False
else:
return True
draft3_format_checker = FormatChecker()
draft4_format_checker = FormatChecker()
draft6_format_checker = FormatChecker()
draft7_format_checker = FormatChecker()
_draft_checkers = dict(
draft3=draft3_format_checker,
draft4=draft4_format_checker,
draft6=draft6_format_checker,
draft7=draft7_format_checker,
)
def _checks_drafts(
name=None,
draft3=None,
draft4=None,
draft6=None,
draft7=None,
raises=(),
):
draft3 = draft3 or name
draft4 = draft4 or name
draft6 = draft6 or name
draft7 = draft7 or name
def wrap(func):
if draft3:
func = _draft_checkers["draft3"].checks(draft3, raises)(func)
if draft4:
func = _draft_checkers["draft4"].checks(draft4, raises)(func)
if draft6:
func = _draft_checkers["draft6"].checks(draft6, raises)(func)
if draft7:
func = _draft_checkers["draft7"].checks(draft7, raises)(func)
# Oy. This is bad global state, but relied upon for now, until
# deprecation. See https://github.com/Julian/jsonschema/issues/519
# and test_format_checkers_come_with_defaults
FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)(
func,
)
return func
return wrap
@_checks_drafts(name="idn-email")
@_checks_drafts(name="email")
def is_email(instance):
if not isinstance(instance, str_types):
return True
return "@" in instance
_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
@_checks_drafts(
draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4",
)
def is_ipv4(instance):
if not isinstance(instance, str_types):
return True
if not _ipv4_re.match(instance):
return False
return all(0 <= int(component) <= 255 for component in instance.split("."))
if hasattr(socket, "inet_pton"):
# FIXME: Really this only should raise struct.error, but see the sadness
# that is https://twistedmatrix.com/trac/ticket/9409
@_checks_drafts(
name="ipv6", raises=(socket.error, struct.error, ValueError),
)
def is_ipv6(instance):
if not isinstance(instance, str_types):
return True
return socket.inet_pton(socket.AF_INET6, instance)
_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
@_checks_drafts(
draft3="host-name",
draft4="hostname",
draft6="hostname",
draft7="hostname",
)
def is_host_name(instance):
if not isinstance(instance, str_types):
return True
if not _host_name_re.match(instance):
return False
components = instance.split(".")
for component in components:
if len(component) > 63:
return False
return True
try:
# The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
import idna
except ImportError:
pass
else:
@_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError)
def is_idn_host_name(instance):
if not isinstance(instance, str_types):
return True
idna.encode(instance)
return True
try:
import rfc3987
except ImportError:
try:
from rfc3986_validator import validate_rfc3986
except ImportError:
pass
else:
@_checks_drafts(name="uri")
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3986(instance, rule="URI_reference")
else:
@_checks_drafts(draft7="iri", raises=ValueError)
def is_iri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI")
@_checks_drafts(draft7="iri-reference", raises=ValueError)
def is_iri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="IRI_reference")
@_checks_drafts(name="uri", raises=ValueError)
def is_uri(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI")
@_checks_drafts(
draft6="uri-reference",
draft7="uri-reference",
raises=ValueError,
)
def is_uri_reference(instance):
if not isinstance(instance, str_types):
return True
return rfc3987.parse(instance, rule="URI_reference")
try:
from strict_rfc3339 import validate_rfc3339
except ImportError:
try:
from rfc3339_validator import validate_rfc3339
except ImportError:
validate_rfc3339 = None
if validate_rfc3339:
@_checks_drafts(name="date-time")
def is_datetime(instance):
if not isinstance(instance, str_types):
return True
return validate_rfc3339(instance)
@_checks_drafts(draft7="time")
def is_time(instance):
if not isinstance(instance, str_types):
return True
return is_datetime("1970-01-01T" + instance)
@_checks_drafts(name="regex", raises=re.error)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
return re.compile(instance)
@_checks_drafts(draft3="date", draft7="date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%Y-%m-%d")
@_checks_drafts(draft3="time", raises=ValueError)
def is_draft3_time(instance):
if not isinstance(instance, str_types):
return True
return datetime.datetime.strptime(instance, "%H:%M:%S")
try:
import webcolors
except ImportError:
pass
else:
def is_css_color_code(instance):
return webcolors.normalize_hex(instance)
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
def is_css21_color(instance):
if (
not isinstance(instance, str_types) or
instance.lower() in webcolors.css21_names_to_hex
):
return True
return is_css_color_code(instance)
def is_css3_color(instance):
if instance.lower() in webcolors.css3_names_to_hex:
return True
return is_css_color_code(instance)
try:
import jsonpointer
except ImportError:
pass
else:
@_checks_drafts(
draft6="json-pointer",
draft7="json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_json_pointer(instance):
if not isinstance(instance, str_types):
return True
return jsonpointer.JsonPointer(instance)
# TODO: I don't want to maintain this, so it
# needs to go either into jsonpointer (pending
# https://github.com/stefankoegl/python-json-pointer/issues/34) or
# into a new external library.
@_checks_drafts(
draft7="relative-json-pointer",
raises=jsonpointer.JsonPointerException,
)
def is_relative_json_pointer(instance):
# Definition taken from:
# https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
if not isinstance(instance, str_types):
return True
non_negative_integer, rest = [], ""
for i, character in enumerate(instance):
if character.isdigit():
non_negative_integer.append(character)
continue
if not non_negative_integer:
return False
rest = instance[i:]
break
return (rest == "#") or jsonpointer.JsonPointer(rest)
try:
import uritemplate.exceptions
except ImportError:
pass
else:
@_checks_drafts(
draft6="uri-template",
draft7="uri-template",
raises=uritemplate.exceptions.InvalidTemplate,
)
def is_uri_template(
instance,
template_validator=uritemplate.Validator().force_balanced_braces(),
):
template = uritemplate.URITemplate(instance)
return template_validator.validate(template) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/_vendor/jsonschema/_format.py | 0.715821 | 0.340992 | _format.py | pypi |
import urllib.parse as urlparse
from poetry.core.semver.exceptions import ParseConstraintError
from poetry.core.semver.helpers import parse_constraint
from .grammars import GRAMMAR_PEP_508_CONSTRAINTS
from .markers import _compact_markers
from .parser import Parser
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
# Parser: PEP 508 Constraints
_parser = Parser(GRAMMAR_PEP_508_CONSTRAINTS, "lalr")
class Requirement(object):
"""
Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
def __init__(self, requirement_string: str) -> None:
from lark import UnexpectedCharacters
from lark import UnexpectedToken
try:
parsed = _parser.parse(requirement_string)
except (UnexpectedCharacters, UnexpectedToken) as e:
raise InvalidRequirement(
"The requirement is invalid: Unexpected character at column {}\n\n{}".format(
e.column, e.get_context(requirement_string)
)
)
self.name = next(parsed.scan_values(lambda t: t.type == "NAME")).value
url = next(parsed.scan_values(lambda t: t.type == "URI"), None)
if url:
url = url.value
parsed_url = urlparse.urlparse(url)
if parsed_url.scheme == "file":
if urlparse.urlunparse(parsed_url) != url:
raise InvalidRequirement(
'The requirement is invalid: invalid URL "{0}"'.format(url)
)
elif (
not (parsed_url.scheme and parsed_url.netloc)
or (not parsed_url.scheme and not parsed_url.netloc)
) and not parsed_url.path:
raise InvalidRequirement(
'The requirement is invalid: invalid URL "{0}"'.format(url)
)
self.url = url
else:
self.url = None
self.extras = [e.value for e in parsed.scan_values(lambda t: t.type == "EXTRA")]
constraint = next(parsed.find_data("version_specification"), None)
if not constraint:
constraint = "*"
else:
constraint = ",".join(constraint.children)
try:
self.constraint = parse_constraint(constraint)
except ParseConstraintError:
raise InvalidRequirement(
'The requirement is invalid: invalid version constraint "{}"'.format(
constraint
)
)
self.pretty_constraint = constraint
marker = next(parsed.find_data("marker_spec"), None)
if marker:
marker = _compact_markers(
marker.children[0].children, tree_prefix="markers__"
)
self.marker = marker
def __str__(self) -> str:
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.pretty_constraint:
parts.append(self.pretty_constraint)
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self) -> str:
return "<Requirement({0!r})>".format(str(self)) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/version/requirements.py | 0.570331 | 0.230086 | requirements.py | pypi |
from contextlib import contextmanager
from io import UnsupportedOperation
from pathlib import Path
from typing import Optional, Dict, Any, Union, List, Sequence, Mapping, ContextManager
from poetry.core.pyproject.profiles import ProfilesActivationRequest, apply_profiles
from poetry.core.pyproject.properties import substitute_toml
from poetry.core.pyproject.tables import BuildSystem, PROPERTIES_TABLE, DEPENDENCIES_TABLE, SUBPROJECTS_TABLE, \
POETRY_TABLE
from poetry.core.utils.props_ext import cached_property
import poetry.core.utils.toml as toml
_PY_PROJECT_CACHE = {}
_PROJECT_MANAGEMENT_FILES_SUBDIR = "etc/rp"
_PARENT_KEY = "tool.relaxed-poetry.parent-project".split(".")
_RELATIVE_PROFILES_DIR = f"{_PROJECT_MANAGEMENT_FILES_SUBDIR}/profiles"
_NAME_KEY = "tool.poetry.name".split(".")
_VERSION_KEY = "tool.poetry.version".split(".")
class Project:
def __init__(
self, data: Dict[str, Any], file: Optional[Path],
parent: Optional["Project"] = None,
profiles: Optional[ProfilesActivationRequest] = None):
self._data = data
self._file = file
self.parent = parent
self._is_parent = None
self._build_system: Optional["BuildSystem"] = None
self._profiles = profiles
def __getitem__(self, item: Union[str, List[str]]) -> Any:
"""
:param item: table key like "tool.relaxed-poetry.properties" or ["tool", "relaxed-poetry", "properties"]
:return: the value if it exists otherwise None
"""
return _get(self._data, item)
def is_stored(self):
return self.path is not None
@property
def path(self) -> Optional[Path]:
return self._file
@property
def data(self) -> Dict[str, Any]:
return self._data
@property
def name(self) -> str:
return self[_NAME_KEY]
@property
def version(self) -> str:
return self[_VERSION_KEY]
@property
def properties(self) -> Dict[str, Any]:
return self[PROPERTIES_TABLE]
# TODO: probably belongs to managed project
@cached_property
def project_management_files(self) -> Optional[Path]:
if not self.is_stored():
return None
return self.path.parent / _PROJECT_MANAGEMENT_FILES_SUBDIR
@cached_property
def dependencies(self):
return self[DEPENDENCIES_TABLE]
def reload(self) -> None:
# noinspection PyProtectedMember
self._data = Project.read(self.path, self._profiles, invalidate_cache=True)._data
@contextmanager
def edit(self) -> ContextManager[Dict[str, Any]]:
if self._file:
data, dumps = toml.load(self._file)
yield data
self._file.write_text(dumps(data))
self.reload()
else:
yield self._data
@cached_property
def requires_python(self):
deps = self[DEPENDENCIES_TABLE] or {}
return 'python' in deps
@cached_property
def sub_projects(self) -> Optional[Dict[str, "Project"]]:
sub_project_defs: Dict[str, str] = self[SUBPROJECTS_TABLE]
if not sub_project_defs:
return {}
return {name: Project.read(_relativize(self.path.parent, path) / "pyproject.toml", None) for name, path in
sub_project_defs.items()}
@property
def build_system(self) -> "BuildSystem":
from poetry.core.pyproject.tables import BuildSystem
if self._build_system is None:
build_backend = None
requires = None
container = self.data.get("build-system", {})
self._build_system = BuildSystem(
build_backend=container.get("build-backend", build_backend),
requires=container.get("requires", requires),
)
return self._build_system
@property
def poetry_config(self) -> Optional[Dict[str, Any]]:
return self[POETRY_TABLE]
def is_parent(self):
if self._is_parent is None:
self._is_parent = self[SUBPROJECTS_TABLE] is not None
return self._is_parent
def lookup_sibling(self, name: str) -> Optional["Project"]:
p = self
while p:
sibling = p.sub_projects.get(name)
if sibling:
return sibling
p = p.parent
return None
def is_poetry_project(self) -> bool:
return self[POETRY_TABLE] is not None
@staticmethod
def _lookup_parent(path: Path) -> Optional[Path]:
path = path.absolute().resolve()
p = path.parent
while p:
parent_project_file = p / "pyproject.toml"
if parent_project_file.exists():
parent_data,_ = toml.load(parent_project_file)
sub_projects = _get(parent_data,SUBPROJECTS_TABLE)
if sub_projects:
for sub_project_path in sub_projects.values():
sub_project_path = _relativize(p, sub_project_path)
if sub_project_path == path:
return parent_project_file
p = p.parent if p.parent != p else None
return None
@staticmethod
def has_poetry_section(path: Path) -> bool:
if not path.exists():
return False
data,_ = toml.load(path)
return _get(data, POETRY_TABLE) is not None
@staticmethod
def create(path: Union[Path, str], exists_ok: bool = False):
if path.exists() and not exists_ok:
raise FileExistsError()
path.write_text('')
return Project({}, path)
@staticmethod
def read(path: Union[Path, str], profiles: Optional[ProfilesActivationRequest] = None, *,
invalidate_cache: bool = False) -> "Project":
path = Path(path) if not isinstance(path, Path) else path
cache_key = f"{path}/{profiles}"
if invalidate_cache or not cache_key in _PY_PROJECT_CACHE:
if path.exists():
toml_data,_ = toml.load(path)
else:
toml_data = {}
# first find parent if such exists..
parent_path = _relativize(path, _get(toml_data, _PARENT_KEY))
if not parent_path:
parent_path = Project._lookup_parent(path.parent)
parent = None
if parent_path:
parent = Project.read(parent_path, None)
parent_props = (parent.properties if parent is not None else None) or {}
my_props = {**parent_props, **(_get(toml_data, PROPERTIES_TABLE) or {})}
# apply profiles if requested
new_deps = {}
if profiles:
profiles_dirs = [path.parent / _RELATIVE_PROFILES_DIR]
p = parent
while p:
profiles_dirs.append(p.path.parent / _RELATIVE_PROFILES_DIR)
p = p.parent
apply_profiles(my_props, new_deps, profiles_dirs, profiles)
# add new dependencies
deps = _put_if_absent(toml_data, DEPENDENCIES_TABLE, {})
for name, spec in new_deps.items():
if name not in deps:
deps[name] = spec
else:
raise UnsupportedOperation(
f"profile attempted to overwrite dependency that was specified in pyproject: {name}")
# substitute properties
substitute_toml(toml_data, my_props)
_PY_PROJECT_CACHE[cache_key] = Project(toml_data, path, parent, profiles)
return _PY_PROJECT_CACHE[cache_key]
@classmethod
def new_in_mem(
cls, name: str,
version: str = "0.0.1", authors: List[str] = None):
data = {
"tool": {
"poetry": {
"name": name,
"version": version,
"authors": authors or []
}}}
return Project(data, None, None, None)
def _relativize(path: Path, relative: Optional[str]):
if not relative:
return None
p = Path(relative)
if p.is_absolute():
return p.resolve()
return (path / p).resolve()
def _put_if_absent(d: Dict[str, Any], path: Sequence[str], value: Any) -> Any:
r = d
for p in path[:-1]:
try:
r = r[p]
except KeyError:
r[p] = {}
r = r[p]
lp = path[-1]
if lp not in r:
r[lp] = value
return r[lp]
def _get(data: Dict[str, Any], item: Union[str, List[str]]):
if isinstance(item, str):
item = list(toml.key2path(item))
r = data
for p in item:
if not r:
return None
if not isinstance(r, Mapping):
raise ValueError(f"in path: {item}, {p} does not lead to dict")
r = r.get(p)
return r | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/pyproject/project.py | 0.778102 | 0.17252 | project.py | pypi |
import json
import os
from typing import Dict, Any
def substitute_toml(doc: Dict[str, Any], props: Dict[str, Any]):
# first override table keys with environment variables
# this means that environment variables overrides profiles changes
properties = {pkey: _merge_env(pkey, pval) for pkey, pval in props.items()}
# next try to perform substitution within the properties themselves
properties = _substitute_properties(properties)
# now that we resolved all the property values we can find the document itself
_substitute(properties, doc)
def _merge_env(property: str, default_value: Any) -> Any:
env_value = os.environ.get(property.replace('-', '_'))
if env_value is None:
return default_value
return json.loads(env_value)
def _substitute_properties(properties: Dict[str, Any]) -> Dict[str, Any]:
sub_properties = {}
cur_round_unsub_keys = {p for p in properties.keys()}
cur_round_changes = 0
next_round_unsub_keys = set()
while True:
for pkey in cur_round_unsub_keys:
pval = properties[pkey]
try:
sub_properties[pkey] = _substitute(sub_properties, pval)
cur_round_changes += 1
except KeyError:
next_round_unsub_keys.add(pkey)
if len(next_round_unsub_keys) == 0:
break
if cur_round_changes == 0:
raise ValueError(f"Circular property references detected: {next_round_unsub_keys}")
cur_round_changes = 0
cur_round_unsub_keys = next_round_unsub_keys
next_round_unsub_keys = set()
return sub_properties
def _substitute(props: Dict[str, Any], o: Any) -> Any:
if isinstance(o, list):
for i, item in enumerate(list(o)):
s = _substitute(props, item)
if s is not item:
o[i] = s
elif isinstance(o, dict):
for k, v in list(o.items()):
s = _substitute(props, v)
if s is not v:
o[k] = s
elif isinstance(o, str):
s = o.strip()
if len(s) > 1 and s[0] == '$':
return props.get(s[1:], s)
return o | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/pyproject/properties.py | 0.529993 | 0.219097 | properties.py | pypi |
import importlib.util
from functools import reduce
from io import UnsupportedOperation
from pathlib import Path
from typing import List, Dict, Any
from dataclasses import dataclass
from poetry.core.pyproject.tables import PROPERTIES_TABLE, DEPENDENCIES_TABLE
from poetry.core.utils import toml
PROFILE_TABLE = "tool.relaxed-poetry.profile".split(".")
@dataclass
class ProfilesActivationRequest:
requested_profiles: Dict[str, bool]
command_name: str
@classmethod
def from_commandline(cls, command: str, profiles: List[str]) -> "ProfilesActivationRequest":
requested_profiles = {}
for profile in profiles:
if profile.startswith("!"):
requested_profiles[profile[1:]] = False
else:
requested_profiles[profile] = True
return cls(requested_profiles, command)
class _Properties:
def __init__(self, props: Dict[str, Any]):
self._props = props
def __getitem__(self, item: str):
return self._props[item]
def __setitem__(self, key: str, value):
self._props[key] = value
def update(self, props: Dict[str, Any]):
self._props.update(props)
class _Dependencies:
def __init__(self, new_deps: Dict[str, Any]):
self.new_deps: Dict[str, Any] = new_deps
def add(self, name: str, specification: Any):
self.new_deps[name] = specification
class _Execution:
def __init__(self, activation: ProfilesActivationRequest):
self._activation = activation
@property
def command_name(self):
return self._activation.command_name
def _should_activate(activate_spec: Any, exec: _Execution):
if isinstance(activate_spec, bool):
return activate_spec
elif isinstance(activate_spec, dict):
if 'commands' in activate_spec:
commands: List[str] = activate_spec['commands']
if isinstance(commands, str):
commands = [commands]
default_accept = '*' in commands
for command in commands:
accept = default_accept
if command == '*':
continue
if command.startswith('!'):
accept = False
command = command[1:]
if exec.command_name == command:
return accept
return False
raise UnsupportedOperation(f"unsupported profile activation spec: {activate_spec}")
def _activate_static_profile(profile_path: Path, props: _Properties, deps: _Dependencies, exec: _Execution):
profile_data,_ = toml.load(profile_path)
profile_def = _lookup(profile_data, PROFILE_TABLE) or {}
# check activation:
activate_spec = profile_def.get('activate', False)
if not _should_activate(activate_spec, exec):
return
print(f"Activating Static Profile: {profile_path.stem}")
props_overrides = _lookup(profile_data, PROPERTIES_TABLE) or {}
props.update(props_overrides)
new_deps = _lookup(profile_data, DEPENDENCIES_TABLE) or {}
for name, spec in new_deps.items():
deps.add(name, spec)
def _activate_dynamic_profile(profile_path: Path, props: _Properties, dependencies: _Dependencies, exec: _Execution):
print(f"Activating Dynamic Profile: {profile_path.stem}")
try:
spec = importlib.util.spec_from_file_location("__PROFILE__", profile_path)
module = importlib.util.module_from_spec(spec)
module.props = props
module.execution = exec
module.deps = dependencies
spec.loader.exec_module(module)
except Exception as e:
raise RuntimeError(f"Error while evaluating profile: {profile_path.stem}") from e
def apply_profiles(
properties: Dict[str, Any],
new_deps: Dict[str, Any],
profiles_dirs: List[Path],
activation_data: ProfilesActivationRequest
):
dependencies = _Dependencies(new_deps)
properties = _Properties(properties)
execution = _Execution(activation_data)
# activate automatic profiles
for profiles_dir in profiles_dirs:
if profiles_dir.exists():
for profile in profiles_dir.iterdir():
if activation_data.requested_profiles.get(profile.stem, True):
if profile.name.endswith(".py"):
_activate_dynamic_profile(profile, properties, dependencies, execution)
elif profile.name.endswith(".toml"):
_activate_static_profile(profile, properties, dependencies, execution)
def _lookup(d: Dict, path: List[str]) -> Any:
try:
return reduce(lambda m, k: m[k], path, d)
except KeyError:
return None | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/pyproject/profiles.py | 0.734024 | 0.168275 | profiles.py | pypi |
import os
import re
import shutil
import stat
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any
from typing import Iterator
from typing import List
from typing import Union
from poetry.core.version.pep440 import PEP440Version
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
_canonicalize_regex = re.compile(r"[-_]+")
def canonicalize_name(name: str) -> str:
return _canonicalize_regex.sub("-", name).lower()
def module_name(name: str) -> str:
return canonicalize_name(name).replace(".", "_").replace("-", "_")
def normalize_version(version: str) -> str:
return PEP440Version.parse(version).to_string(short=True)
@contextmanager
def temporary_directory(*args: Any, **kwargs: Any) -> Iterator[str]:
name = tempfile.mkdtemp(*args, **kwargs)
yield name
safe_rmtree(name)
def parse_requires(requires: str) -> List[str]:
lines = requires.split("\n")
requires_dist = []
in_section = False
current_marker = None
for line in lines:
line = line.strip()
if not line:
if in_section:
in_section = False
continue
if line.startswith("["):
# extras or conditional dependencies
marker = line.lstrip("[").rstrip("]")
if ":" not in marker:
extra, marker = marker, None
else:
extra, marker = marker.split(":")
if extra:
if marker:
marker = '{} and extra == "{}"'.format(marker, extra)
else:
marker = 'extra == "{}"'.format(extra)
if marker:
current_marker = marker
continue
if current_marker:
line = "{} ; {}".format(line, current_marker)
requires_dist.append(line)
return requires_dist
def _on_rm_error(func: Any, path: Union[str, Path], exc_info: Any) -> None:
if not os.path.exists(path):
return
os.chmod(path, stat.S_IWRITE)
func(path)
def safe_rmtree(path: Union[str, Path]) -> None:
if Path(path).is_symlink():
return os.unlink(str(path))
shutil.rmtree(path, onerror=_on_rm_error)
def merge_dicts(d1: dict, d2: dict) -> None:
for k, v in d2.items():
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):
merge_dicts(d1[k], d2[k])
else:
d1[k] = d2[k] | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/utils/helpers.py | 0.566498 | 0.192957 | helpers.py | pypi |
import re
from copy import copy
from numbers import Number
from pathlib import Path
from typing import Optional, Dict, Any, Mapping, List, Tuple, Union, NoReturn, MutableMapping, Callable
from dataclasses import dataclass
_KEY_RX = re.compile(r'[a-zA-Z0-9\-_]+')
KEY_T = Tuple
DUMPS_T = Callable[[Dict[str, Any]], str]
class TomlTimeLiteral(str):
pass
@dataclass
class _ValueStyle:
prolog: str = ''
epilog: str = ''
content_trailing: str = ''
value_str: Optional[str] = None
value_sig: int = 0
display: str = 'auto'
@dataclass
class _KeyStyle:
prolog: str = ''
epilog: str = ''
key_str: Optional[str] = None
display: str = 'auto'
_DEFAULT_VALUE_STYLE = _ValueStyle()
_DEFAULT_KEY_STYLE = _KeyStyle()
class _StyleSheet:
def __init__(self):
self.keys: Dict[KEY_T, _KeyStyle] = {}
self.values: Dict[KEY_T, _ValueStyle] = {}
def lastify(self, key: KEY_T) -> KEY_T:
lk = len(key) + 1
index = sum(len(k) == lk and k[:-1] == key for k in self.keys.keys())
return (*key, index)
def set_key_style(self, key: KEY_T, style: _KeyStyle):
self.keys[key] = style
def set_value_style(self, key: KEY_T, value: Any, style: _ValueStyle):
if isinstance(value, (Mapping, List)):
style.value_sig = type(value)
else:
style.value_sig = id(value)
self.values[key] = style
def update_value_auto_display(self, key: KEY_T, value: Any, display: str):
if key not in self.values:
self.set_value_style(key, value, _ValueStyle(display=display))
else:
vstyle = self.values[key]
if vstyle.display == 'auto':
vstyle.display = display
def key_style(self, key: KEY_T, display: str = 'auto') -> _KeyStyle:
result = self.keys.get(key) or _DEFAULT_KEY_STYLE
if display != 'auto' and result.display != display:
result = copy(result)
result.display = display
if display == 'assignment':
result.epilog = ' '
return result
def value_style(self, key: KEY_T, value: Any, display: str = 'auto') -> _ValueStyle:
result = self.values.get(key)
if not result or result.value_sig is not type(value) and result.value_sig != id(value):
result = _DEFAULT_VALUE_STYLE
if result.display != 'auto' and (display == 'auto' or display == result.display):
return result
vstyle = copy(result)
if display != 'auto':
vstyle.display = display
else:
vstyle.display = 'inline'
if isinstance(value, Mapping):
is_root = not key
is_first_level = len(key) == 1
has_no_assignments = all(
isinstance(it, Mapping) or
(isinstance(it, List) and self.value_style((*key, k), it).display == 'table-list')
for k, it in value.items())
contains_large_data = \
len(value) > 5 or \
any(isinstance(it, (Mapping, List)) or (isinstance(it, str) and len(it) > 40)
for it in value.values())
if is_root:
vstyle.display = 'regular'
elif has_no_assignments:
vstyle.display = 'hidden'
elif contains_large_data or is_first_level:
vstyle.display = 'regular'
elif isinstance(value, List):
has_items = len(value) > 0
has_no_assignments = all(isinstance(it, Mapping) for it in value)
if has_items and has_no_assignments:
vstyle.display = 'table-list'
# configure auto display
if vstyle.display == 'inline':
vstyle.prolog = ' '
return vstyle
class _Writer:
def __init__(self, style: Optional[_StyleSheet] = None):
self.style = style or _StyleSheet()
def write(self, data: Dict[str, Any]) -> str:
key = ()
return self._write_value(data, key, self.style.value_style(key, data)).rstrip()
def _write_value(self, data: Any, key: KEY_T, vstyle: _ValueStyle) -> str:
if isinstance(data, (bool)):
return self._write_bool(data, vstyle)
if isinstance(data, (Number, TomlTimeLiteral)):
return self._write_nummeric_like(data, vstyle)
if isinstance(data, str):
return self._write_str(data, vstyle)
if isinstance(data, Mapping):
return self._write_table(data, key, vstyle)
if isinstance(data, List):
return self._write_list(data, key, vstyle)
raise ValueError(f"Unknown value type: {type(data)} for key {key}")
def _write_nummeric_like(self, data: Any, style: _ValueStyle) -> str:
v = style.value_str
if v is None:
v = str(data)
return f"{style.prolog}{v}{style.epilog}"
def _write_bool(self, data: Any, style: _ValueStyle) -> str:
v = style.value_str
if v is None:
v = 'true' if data else 'false'
return f"{style.prolog}{v}{style.epilog}"
def _write_list(self, data: List[Any], key: KEY_T, vstyle: _ValueStyle) -> str:
if vstyle.display == 'inline':
return self._write_inline_list(data, key, vstyle)
return self._write_table_list(data, key, vstyle)
def _write_inline_list(self, data: List[Any], key: KEY_T, vstyle: _ValueStyle) -> str:
result = f"{vstyle.prolog}["
def _write_value(v: Any, i: int) -> str:
k = (*key, i)
s = self.style.value_style(k, v, 'inline')
return self._write_value(v, k, s)
result += ', '.join(_write_value(v, i) for i, v in enumerate(data))
result += f'{vstyle.content_trailing}]{vstyle.epilog}'
return result
def _write_table(self, data: Mapping[str, Any], key: KEY_T, vstyle: _ValueStyle) -> str:
if vstyle.display == 'regular':
return self._write_regular_table(data, key, vstyle)
if vstyle.display == 'hidden':
return self._write_hidden_table(data, key, vstyle)
if vstyle.display == 'inline':
return self._write_inline_table(data, key, vstyle)
if vstyle.display == 'dotted':
return self._write_dotted_table(data, key, vstyle)
raise ValueError(f"unknown table display style: {vstyle.display}")
def _write_str(self, data: str, vstyle: _ValueStyle) -> str:
value_str = vstyle.value_str
if value_str is None:
escaped = data.translate({
ord('\b'): '\\b',
ord('\t'): '\\t',
ord('\n'): '\\n',
ord('\f'): '\\f',
ord('\r'): '\\r',
ord('"'): '\\"',
ord('\\'): '\\\\',
})
value_str = f'"{escaped}"'
return f'{vstyle.prolog}{value_str}{vstyle.epilog}'
def _write_key(self, key: KEY_T, offset: int, display: str) -> str:
if not key:
return ''
key_style = self.style.key_style(key, display)
result = key_style.prolog
key_str = key_style.key_str
if not key_str:
key_str = ''
if display == 'table':
key_str += '['
elif display == 'table-list-item':
key_str += '[['
key_str += '.'.join(
self._write_key_part(key[0:i])
for i in range(offset + 1, len(key) + 1)
if isinstance(key[i - 1], str))
if display == 'table':
key_str += ']'
elif display == 'table-list-item':
key_str += ']]'
result += key_str
result += key_style.epilog
if display != 'assignment':
result += '\n'
return result
def _write_key_part(self, part: KEY_T) -> str:
if _KEY_RX.fullmatch(part[-1]):
return part[-1]
else:
return self._write_str(part[-1], _DEFAULT_VALUE_STYLE)
def _write_regular_table(
self, data: Mapping[str, Any],
key: KEY_T, vstyle: _ValueStyle) -> str:
result = vstyle.prolog
key_display = vstyle.display if vstyle.display == 'table-list-item' else 'table'
result += self._write_key(key, 0, key_display)
delayed_items: Dict[KEY_T, Any] = {}
has_assignments = False
for partial_key, item in data.items():
item_key = (*key, partial_key)
item_style = self.style.value_style(item_key, item)
if item_style.display == 'inline':
result += f"{self._write_key(item_key, len(key), 'assignment')}" \
f"={self._write_value(item, item_key, item_style)}\n"
has_assignments = True
else:
delayed_items[item_key] = item
if has_assignments:
# result += f'({key}\n)'
result += '\n'
for item_key, item in delayed_items.items():
result += self._write_value(item, item_key, self.style.value_style(item_key, item))
result += vstyle.epilog
return result
def _write_hidden_table(self, data: Mapping[str, Any], key: KEY_T, vstyle: _ValueStyle) -> str:
result = ''
for partial_key, item in data.items():
item_key = (*key, partial_key)
is_list = isinstance(item, List)
is_table = isinstance(item, Mapping)
inappropriate_item = not is_list and not is_table
inappropriate_item = inappropriate_item or is_list and any(not isinstance(it, Mapping) for it in item)
if inappropriate_item:
return self._write_regular_table(data, key, self.style.value_style(key, data, 'regular'))
item_style = self.style.value_style(item_key, item)
if item_style.display == 'inline':
item_style = self.style.value_style(item_key, item, 'regular')
if is_list:
result += self._write_table_list(item, item_key, item_style)
elif item_style.display == 'hidden':
result += self._write_hidden_table(item, item_key, item_style)
else:
result += self._write_regular_table(item, item_key, item_style)
return result
def _write_inline_table(self, data: Mapping[str, Any], key: KEY_T, vstyle: _ValueStyle) -> str:
result = f"{vstyle.prolog}{{"
def _write_assignment(partial_key: str, item: Any) -> str:
item_key = (*key, partial_key)
item_style = self.style.value_style(item_key, item, 'inline')
return f"{self._write_key(item_key, len(key), 'assignment')}" \
f"={self._write_value(item, item_key, item_style)}"
result += ', '.join(_write_assignment(k, v) for k, v in data.items())
result += f"}}{vstyle.epilog}"
return result
def _write_dotted_table(self, data: Mapping[str, Any], key: KEY_T, vstyle: _ValueStyle) -> str:
subkey = self._write_key_part(key[-1])
result = ""
def _write_assignment(partial_key: str, item: Any) -> str:
item_key = (*key, partial_key)
key_style = self.style.key_style(item_key)
key_str = self._write_key(item_key, len(key), 'assignment')
if not key_style.key_str:
key_str = f"{subkey}.{key_str}"
item_style = self.style.value_style(item_key, item, 'inline')
value_str = self._write_value(item, item_key, item_style)
if isinstance(item, Mapping) and item_style.display == 'dotted':
return value_str
return f"{key_str}={value_str}"
result += '\n'.join(_write_assignment(k, v) for k, v in data.items())
result += vstyle.epilog
return result
def _write_table_list(self, data: List[Mapping[str, Any]], key: KEY_T, vstyle: _ValueStyle) -> str:
def _write_table_item(table: Mapping[str, Any], index: int):
table_key = (*key, index)
table_style = self.style.value_style(table_key, table, 'table-list-item')
return self._write_regular_table(table, table_key, table_style)
items = ''.join(_write_table_item(it, i) for i, it in enumerate(data))
return f"{vstyle.prolog}{items}{vstyle.epilog}"
class _Reader:
def __init__(self, text: str, file_name: Optional[str]):
self.text = text
self.file_name = file_name
self.position = 0
self.style = _StyleSheet()
def peek(self, amount: int = 1) -> str:
p = self.position
return self.text[p:p + amount]
def is_not_empty(self) -> bool:
return self.position < len(self.text)
def match(self, substr: str) -> bool:
if self.peek(len(substr)) == substr:
self.next(len(substr))
return True
return False
def raise_err(self, msg: str, lines_to_show: int = 4) -> NoReturn:
lines = self.text.splitlines(keepends=True)
current_line = self.text.count('\n', 0, self.position)
start_line = max(0, current_line - lines_to_show)
sub_line_len = self.position - sum(len(it) for it in lines[:current_line])
largest_line_len = max(len(lines[i]) if i < len(lines) else 0 for i in range(start_line, current_line + 1))
pref = f'... after {start_line} lines ...\n\n' if start_line > 0 else ''
largest_line_len = max(largest_line_len, len(pref))
position_indicator = f"AT LINE {current_line}"
if self.file_name:
position_indicator = f"{self.file_name}:{current_line}"
out = f"Toml Parsing Failed: {msg} ({position_indicator})\n" \
f"{'-' * largest_line_len}\n" \
f"{pref}" \
f"{''.join(lines[start_line:current_line + 1])}" \
f"{'~' * sub_line_len}^"
raise ValueError(out)
def until_match(self, substr: str) -> str:
pos = self.position
try:
self.position = self.text.index(substr, pos)
return self.text[pos: self.position]
except ValueError:
return ''
def until(self, predicate: Callable[[int, str], bool]) -> str:
p = self.position
for i in range(self.position, len(self.text)):
if predicate(i, self.text):
self.position = i
return self.text[p:i]
self.position = len(self.text)
return self.text[p:self.position]
def next(self, amount: int = 1) -> str:
peek = self.peek(amount)
self.position += len(peek)
return peek
def match_or_err(self, substr: str, err: str) -> None:
if not self.match(substr):
self.raise_err(err)
def __str__(self):
return f"Buffer(pos={self.position}, '{self.text[self.position: self.position + 25]}...')"
def read_ws(self, allow_new_lines: bool = True) -> str:
p = self.position
while self.is_not_empty():
n = self.peek()
if n == '\n' and not allow_new_lines:
break
elif not n.isspace():
break
self.next()
return self.text[p:self.position]
def read_non_data(self, allow_new_lines: bool = True) -> str:
result = ''
while self.is_not_empty():
result += self.read_ws(allow_new_lines)
n = self.peek()
if n == '#': # comment
result += self.until_match('\n')
else:
break
return result
def read_multiline_basic_str(self) -> str:
self.match_or_err('"""', 'multiline basic string expected')
s = '"""' + self.until_match('"""')
while s[-1] == '\\':
s += self.next(3) + self.until_match('"""')
s += '"""'
self.match_or_err('"""', 'multiline basic string ending expected')
return eval(s)
def read_multiline_literal_str(self) -> str:
self.match_or_err("'''", 'multiline literal string expected')
string = self.until_match("'''")
self.match_or_err("'''", "missing multiline literal string closing")
return string
def read_basic_str(self) -> str:
self.match_or_err('"', 'basic string expected')
s = '"' + self.until_match('"')
while s[-1] == '\\':
s += self.next() + self.until_match('"')
s += '"'
self.match_or_err('"', 'basic string ending expected')
return eval(s)
def read_literal_str(self) -> str:
self.match_or_err("'", 'literal string expected')
string = self.until_match("'")
self.match_or_err("'", "missing literal string closing")
return string
def read_str(self) -> str:
n = self.peek(3)
value = ''
if n == '"""':
value = self.read_multiline_basic_str()
elif n == "'''":
value = self.read_multiline_literal_str()
elif n[0] == '"':
value = self.read_basic_str()
elif n[0] == "'":
value = self.read_literal_str()
else:
self.raise_err("string expected type")
return value
def read_bare_key(self) -> str:
result = ''
while self.is_not_empty():
n = self.peek()
if _KEY_RX.match(n):
result += self.next()
else:
break
return result
def read_key(self, base_key: KEY_T = ()) -> KEY_T:
prolog = self.read_non_data()
p = self.position
table_key = self.match('[')
table_list_key = False
if table_key:
self.read_ws(False)
table_list_key = self.match('[')
parts = []
while self.is_not_empty():
self.read_ws(False)
n = self.peek()
if n in '"\'':
parts.append(self.read_str())
elif _KEY_RX.fullmatch(n):
parts.append(self.read_bare_key())
else:
self.raise_err('non-key character')
pp = self.position
self.read_ws(False)
if not self.match('.'):
self.position = pp
break
key = tuple(parts)
if table_key:
self.match_or_err(']', 'expecting table key termination')
if table_list_key:
self.read_ws()
self.match_or_err(']', 'expecting table list key termination')
key = self.style.lastify(key)
key_str = self.text[p:self.position]
epilog = self.read_non_data(False)
display = 'assignment'
if table_key:
display = 'table'
if table_list_key:
display = 'table-list-item'
self.style.set_key_style((*base_key, *key),
_KeyStyle(prolog=prolog, epilog=epilog, key_str=key_str, display=display))
return key
def _mark_internal_tables(self, table_key: KEY_T, key: KEY_T, display: str):
for i in range(1, len(key) + 1):
dtable_key = (*table_key, *key[0:i])
self.style.update_value_auto_display(dtable_key, {}, display)
def read_date(self) -> TomlTimeLiteral:
s = self.until(lambda s, i: s[i].isnumeric() or s[i] in '-.: TZ')
return TomlTimeLiteral(s)
def read_number_or_date(self) -> Union[Number, TomlTimeLiteral]:
pos = self.position
sign = (self.match('-') and '-') or (self.match('+') and '+') or ''
s = sign + self.until(lambda i, s: not s[i].isnumeric() and s[i] not in '.e')
n = self.peek()
if n in '-:':
self.position = pos
return self.read_date()
if not s:
self.raise_err("number expected")
return eval(s)
def read_bool(self) -> bool:
if self.match('true'):
return True
if self.match('false'):
return False
self.raise_err("boolean expected")
def read_inline_list(self, key: KEY_T) -> List[Any]:
prolog = self.read_non_data(False)
self.match_or_err('[', 'expecting list start')
result = []
p = self.position
content_trailing = None
while self.is_not_empty():
item_prolog = self.read_non_data()
if self.match(']'):
self.position -= 1
content_trailing = self.text[p:self.position]
break
item_key = (*key, len(result))
next = self.read_inline_value(item_key)
item_style = self.style.value_style(item_key, next)
item_style.prolog = item_prolog
item_style.epilog = self.read_non_data()
result.append(next)
p = self.position
if not self.match(', ') and not self.match(','):
break
content_trailing = content_trailing or self.read_non_data()
self.match_or_err(']', 'expecting list end')
epilog = self.read_non_data(False)
self.style.set_value_style(key, result, _ValueStyle(prolog=prolog, epilog=epilog, display='inline',
content_trailing=content_trailing))
return result
def read_inline_table(self, key: KEY_T) -> Dict[str, Any]:
prolog = self.read_non_data(False)
self.match_or_err('{', 'expecting inline-table start')
result = self.read_table_assignments(key, inline=True)
self.match_or_err('}', 'expecting inline-table end')
epilog = self.read_non_data(False)
self.style.set_value_style(key, result, _ValueStyle(prolog=prolog, epilog=epilog, display='inline'))
return result
def read_inline_value(self, key: KEY_T) -> Any:
prolog = self.read_non_data()
value_str = None
p = self.position
n = self.peek()
result = None
if n in "+-" or n.isnumeric():
result = self.read_number_or_date()
value_str = self.text[p:self.position]
elif n in '\'"':
result = self.read_str()
value_str = self.text[p:self.position]
elif n in 'tf':
result = self.read_bool()
value_str = self.text[p:self.position]
elif n == '[':
result = self.read_inline_list(key)
elif n == '{':
result = self.read_inline_table(key)
if result is None:
self.raise_err("value expected")
vstyle = self.style.value_style(key, result)
vstyle.prolog = prolog
vstyle.epilog = self.read_non_data(False)
vstyle.value_str = value_str
vstyle.display = 'inline'
self.style.set_value_style(key, result, vstyle)
return result
def _enter(self, data: Any, key: KEY_T, value: Any):
if not key:
return data
if isinstance(data, List):
if len(key) == 1 and isinstance(key[0], Number):
data.append(value)
return
return self._enter(data[-1], key, value)
if isinstance(data, MutableMapping):
if len(key) == 1:
data[key[0]] = value
return
if key[0] not in data:
if isinstance(key[1], Number):
data[key[0]] = []
else:
data[key[0]] = {}
return self._enter(data[key[0]], key[1:], value)
raise ValueError(f"cannot enter {data}")
def read_table_assignments(self, table_key: KEY_T, inline: bool = False) -> Dict[
str, Any]:
data = {}
while self.is_not_empty():
p = self.position
self.read_non_data()
key_start = self.peek()
self.position = p
if not key_start or key_start == '[':
break
k = self.read_key(base_key=table_key)
self.match_or_err('=', 'expecting assignment')
v = self.read_inline_value((*table_key, *k))
self._enter(data, k, v)
if len(k) > 1:
self._mark_internal_tables(table_key, k[:-1], 'dotted')
if inline and not self.match(', ') and not self.match(','):
break
if not inline and not self.match('\n'):
break
self.match('\n') # drop last \n seperating assignments and next table - it is added implicitly
return data
def read_regular_table(self) -> Tuple[KEY_T, Dict[str, Any]]:
prolog = self.read_non_data()
key = self.read_key()
self._mark_internal_tables((), key[:-1], 'hidden')
self.match_or_err('\n', 'expecting newline after table key')
data = self.read_table_assignments(key)
if key[-1] == -1:
key = (*key[:-1], id(data))
self.style.set_value_style(key, data, _ValueStyle(prolog=prolog, display='regular'))
return key, data
def read(self) -> Dict[str, Any]:
prolog = self.read_non_data()
epilog = ''
data = self.read_table_assignments(())
while self.is_not_empty():
p = self.position
epilog = self.read_non_data()
if self.is_not_empty():
self.position = p
epilog = ''
key, value = self.read_regular_table()
self._enter(data, key, value)
else:
break
self.style.set_value_style((), data, _ValueStyle(prolog=prolog, epilog=epilog, display='regular'))
return data
def key2path(key: str) -> KEY_T:
return _Reader(key, None).read_key(())
def loads(data: str, file_name: Optional[str] = None) -> Tuple[Dict[str, Any], DUMPS_T]:
reader = _Reader(data, file_name)
data = reader.read()
return data, lambda x: _Writer(reader.style).write(x)
def load(file: Union[Path, str]) -> Tuple[Dict[str, Any], DUMPS_T]:
file = Path(file)
return loads(file.read_text(), str(file.absolute()))
def dumps(data: Dict[str, Any]) -> str:
return _Writer().write(data) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/utils/toml.py | 0.793586 | 0.225833 | toml.py | pypi |
from typing import TypeVar, Iterable, Callable, Union, Sequence, List, Dict, Any, MutableMapping
T = TypeVar("T")
def _first_last_pred_default(item) -> bool:
return item is True
def last(
iterable: Iterable[T],
pred: Callable[[T], bool] = _first_last_pred_default,
index: bool = False) -> Union[int, T, None]:
match = None
imatch = -1
for i, it in enumerate(iterable):
if pred(it):
match, imatch = it, i
if index:
return imatch
return match
def first(
iterable: Iterable[T],
pred: Callable[[T], bool] =
_first_last_pred_default, index: bool = False) -> Union[int, T, None]:
for i, it in enumerate(iterable):
if pred(it):
if index:
return i
return it
return -1
def startswith(seq: Sequence[T], prefix: Sequence[T]) -> bool:
if len(seq) < len(prefix):
return False
it = iter(seq)
for i in prefix:
if i != next(it):
return False
return True
def insert_or_append(l: List[T], index: int, item: T) -> None:
if index >= len(l):
l.append(item)
else:
l.insert(index, item)
def nested_dict_del(nd: Dict[str, Any], path: Sequence[str]):
r = nd
for p in path[:-1]:
rp = r.get(p)
if rp is None:
rp = r[p] = {}
elif not isinstance(rp, MutableMapping):
raise ValueError(f"path {path} pass through a terminal {p}")
r = rp
if r:
del r[path[-1]]
def nested_dict_set(nd: Dict[str, Any], path: Sequence[str], value: Any):
r = nd
for p in path[:-1]:
rp = r.get(p)
if rp is None:
rp = r[p] = {}
elif not isinstance(rp, MutableMapping):
raise ValueError(f"path {path} pass through a terminal {p}")
r = rp
r[path[-1]] = value
def nested_dict_get(nd: Dict[str, Any], path: Sequence[str]) -> Any:
r = nd
for p in path[:-1]:
rp = r.get(p)
if rp is None:
return None
elif not isinstance(rp, MutableMapping):
raise ValueError(f"path {path} pass through a terminal {p}")
r = rp
return r[path[-1]] | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/utils/collections.py | 0.791781 | 0.49823 | collections.py | pypi |
import logging
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from poetry.core.factory import Factory
from .builders.sdist import SdistBuilder
from .builders.wheel import WheelBuilder
log = logging.getLogger(__name__)
def get_requires_for_build_wheel(
config_settings: Optional[Dict[str, Any]] = None,
) -> List[str]:
"""
Returns an additional list of requirements for building, as PEP508 strings,
above and beyond those specified in the pyproject.toml file.
This implementation is optional. At the moment it only returns an empty list, which would be the same as if
not define. So this is just for completeness for future implementation.
"""
return []
# For now, we require all dependencies to build either a wheel or an sdist.
get_requires_for_build_sdist = get_requires_for_build_wheel
def prepare_metadata_for_build_wheel(
metadata_directory: str, config_settings: Optional[Dict[str, Any]] = None
) -> str:
poetry = Factory().create_poetry(Path(".").resolve())
builder = WheelBuilder(poetry)
dist_info = Path(metadata_directory, builder.dist_info)
dist_info.mkdir(parents=True, exist_ok=True)
if "scripts" in poetry.local_config or "plugins" in poetry.local_config:
with (dist_info / "entry_points.txt").open("w", encoding="utf-8") as f:
builder._write_entry_points(f)
with (dist_info / "WHEEL").open("w", encoding="utf-8") as f:
builder._write_wheel_file(f)
with (dist_info / "METADATA").open("w", encoding="utf-8") as f:
builder._write_metadata_file(f)
return dist_info.name
def build_wheel(
wheel_directory: str,
config_settings: Optional[Dict[str, Any]] = None,
metadata_directory: Optional[str] = None,
) -> str:
"""Builds a wheel, places it in wheel_directory"""
poetry = Factory().create_poetry(Path(".").resolve())
return WheelBuilder.make_in(poetry, Path(wheel_directory))
def build_sdist(
sdist_directory: str, config_settings: Optional[Dict[str, Any]] = None
) -> str:
"""Builds an sdist, places it in sdist_directory"""
poetry = Factory().create_poetry(Path(".").resolve())
path = SdistBuilder(poetry).build(Path(sdist_directory))
return path.name
def build_editable(
wheel_directory: str,
config_settings: Optional[Dict[str, Any]] = None,
metadata_directory: Optional[str] = None,
) -> str:
poetry = Factory().create_poetry(Path(".").resolve())
return WheelBuilder.make_in(poetry, Path(wheel_directory), editable=True)
get_requires_for_build_editable = get_requires_for_build_wheel
prepare_metadata_for_build_editable = prepare_metadata_for_build_wheel | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/masonry/api.py | 0.787809 | 0.223864 | api.py | pypi |
from pathlib import Path
from typing import List
from typing import Optional
from .include import Include
class PackageInclude(Include):
def __init__(
self,
base: Path,
include: str,
formats: Optional[List[str]] = None,
source: Optional[str] = None,
) -> None:
self._package = None
self._is_package = False
self._is_module = False
self._source = source
if source is not None:
base = base / source
super(PackageInclude, self).__init__(base, include, formats=formats)
self.check_elements()
@property
def package(self) -> str:
return self._package
@property
def source(self) -> Optional[str]:
return self._source
def is_package(self) -> bool:
return self._is_package
def is_module(self) -> bool:
return self._is_module
def refresh(self) -> "PackageInclude":
super(PackageInclude, self).refresh()
return self.check_elements()
def is_stub_only(self) -> bool:
# returns `True` if this a PEP 561 stub-only package,
# see [PEP 561](https://www.python.org/dev/peps/pep-0561/#stub-only-packages)
return self.package.endswith("-stubs") and all(
el.suffix == ".pyi"
or (el.parent.name == self.package and el.name == "py.typed")
for el in self.elements
if el.is_file()
)
def has_modules(self) -> bool:
# Packages no longer need an __init__.py in python3, but there must
# at least be one .py file for it to be considered a package
return any(element.suffix == ".py" for element in self.elements)
def check_elements(self) -> "PackageInclude":
if not self._elements:
raise ValueError(
"{} does not contain any element".format(self._base / self._include)
)
root = self._elements[0]
if len(self._elements) > 1:
# Probably glob
self._is_package = True
self._package = root.parent.name
if not self.is_stub_only() and not self.has_modules():
raise ValueError("{} is not a package.".format(root.name))
else:
if root.is_dir():
# If it's a directory, we include everything inside it
self._package = root.name
self._elements: List[Path] = sorted(list(root.glob("**/*")))
if not self.is_stub_only() and not self.has_modules():
raise ValueError("{} is not a package.".format(root.name))
self._is_package = True
else:
self._package = root.stem
self._is_module = True
return self | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/masonry/utils/package_include.py | 0.894959 | 0.236781 | package_include.py | pypi |
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
class ModuleOrPackageNotFound(ValueError):
pass
class Module:
def __init__(
self,
name: str,
directory: str = ".",
packages: Optional[List[Dict[str, Any]]] = None,
includes: Optional[List[Dict[str, Any]]] = None,
) -> None:
from poetry.core.utils.helpers import module_name
from .include import Include
from .package_include import PackageInclude
self._name = module_name(name)
self._in_src = False
self._is_package = False
self._path = Path(directory)
self._includes = []
packages = packages or []
includes = includes or []
if not packages:
# It must exist either as a .py file or a directory, but not both
pkg_dir = Path(directory, self._name)
py_file = Path(directory, self._name + ".py")
if pkg_dir.is_dir() and py_file.is_file():
raise ValueError("Both {} and {} exist".format(pkg_dir, py_file))
elif pkg_dir.is_dir():
packages = [{"include": str(pkg_dir.relative_to(self._path))}]
elif py_file.is_file():
packages = [{"include": str(py_file.relative_to(self._path))}]
else:
# Searching for a src module
src = Path(directory, "src")
src_pkg_dir = src / self._name
src_py_file = src / (self._name + ".py")
if src_pkg_dir.is_dir() and src_py_file.is_file():
raise ValueError("Both {} and {} exist".format(pkg_dir, py_file))
elif src_pkg_dir.is_dir():
packages = [
{
"include": str(src_pkg_dir.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
elif src_py_file.is_file():
packages = [
{
"include": str(src_py_file.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
else:
raise ModuleOrPackageNotFound(
"No file/folder found for package {}".format(name)
)
for package in packages:
formats = package.get("format")
if formats and not isinstance(formats, list):
formats = [formats]
self._includes.append(
PackageInclude(
self._path,
package["include"],
formats=formats,
source=package.get("from"),
)
)
for include in includes:
self._includes.append(
Include(self._path, include["path"], formats=include["format"])
)
@property
def name(self) -> str:
return self._name
@property
def path(self) -> Path:
return self._path
@property
def file(self) -> Path:
if self._is_package:
return self._path / "__init__.py"
else:
return self._path
@property
def includes(self) -> List:
return self._includes
def is_package(self) -> bool:
return self._is_package
def is_in_src(self) -> bool:
return self._in_src | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/masonry/utils/module.py | 0.813572 | 0.22431 | module.py | pypi |
from collections import namedtuple
from typing import Optional
class License(namedtuple("License", "id name is_osi_approved is_deprecated")):
CLASSIFIER_SUPPORTED = {
# Not OSI Approved
"Aladdin",
"CC0-1.0",
"CECILL-B",
"CECILL-C",
"NPL-1.0",
"NPL-1.1",
# OSI Approved
"AFPL",
"AFL-1.1",
"AFL-1.2",
"AFL-2.0",
"AFL-2.1",
"AFL-3.0",
"Apache-1.1",
"Apache-2.0",
"APSL-1.1",
"APSL-1.2",
"APSL-2.0",
"Artistic-1.0",
"Artistic-2.0",
"AAL",
"AGPL-3.0",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"BSL-1.0",
"BSD-2-Clause",
"BSD-3-Clause",
"CDDL-1.0",
"CECILL-2.1",
"CPL-1.0",
"EFL-1.0",
"EFL-2.0",
"EPL-1.0",
"EPL-2.0",
"EUPL-1.1",
"EUPL-1.2",
"GPL-2.0",
"GPL-2.0+",
"GPL-2.0-only",
"GPL-2.0-or-later",
"GPL-3.0",
"GPL-3.0+",
"GPL-3.0-only",
"GPL-3.0-or-later",
"LGPL-2.0",
"LGPL-2.0+",
"LGPL-2.0-only",
"LGPL-2.0-or-later",
"LGPL-3.0",
"LGPL-3.0+",
"LGPL-3.0-only",
"LGPL-3.0-or-later",
"MIT",
"MPL-1.0",
"MPL-1.1",
"MPL-1.2",
"Nokia",
"W3C",
"ZPL-1.0",
"ZPL-2.0",
"ZPL-2.1",
}
CLASSIFIER_NAMES = {
# Not OSI Approved
"AFPL": "Aladdin Free Public License (AFPL)",
"CC0-1.0": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
"CECILL-B": "CeCILL-B Free Software License Agreement (CECILL-B)",
"CECILL-C": "CeCILL-C Free Software License Agreement (CECILL-C)",
"NPL-1.0": "Netscape Public License (NPL)",
"NPL-1.1": "Netscape Public License (NPL)",
# OSI Approved
"AFL-1.1": "Academic Free License (AFL)",
"AFL-1.2": "Academic Free License (AFL)",
"AFL-2.0": "Academic Free License (AFL)",
"AFL-2.1": "Academic Free License (AFL)",
"AFL-3.0": "Academic Free License (AFL)",
"Apache-1.1": "Apache Software License",
"Apache-2.0": "Apache Software License",
"APSL-1.1": "Apple Public Source License",
"APSL-1.2": "Apple Public Source License",
"APSL-2.0": "Apple Public Source License",
"Artistic-1.0": "Artistic License",
"Artistic-2.0": "Artistic License",
"AAL": "Attribution Assurance License",
"AGPL-3.0": "GNU Affero General Public License v3",
"AGPL-3.0-only": "GNU Affero General Public License v3",
"AGPL-3.0-or-later": "GNU Affero General Public License v3 or later (AGPLv3+)",
"BSL-1.0": "Boost Software License 1.0 (BSL-1.0)",
"BSD-2-Clause": "BSD License",
"BSD-3-Clause": "BSD License",
"CDDL-1.0": "Common Development and Distribution License 1.0 (CDDL-1.0)",
"CECILL-2.1": "CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)",
"CPL-1.0": "Common Public License",
"EPL-1.0": "Eclipse Public License 1.0 (EPL-1.0)",
"EFL-1.0": "Eiffel Forum License",
"EFL-2.0": "Eiffel Forum License",
"EUPL-1.1": "European Union Public Licence 1.1 (EUPL 1.1)",
"EUPL-1.2": "European Union Public Licence 1.2 (EUPL 1.2)",
"GPL-2.0": "GNU General Public License v2 (GPLv2)",
"GPL-2.0-only": "GNU General Public License v2 (GPLv2)",
"GPL-2.0+": "GNU General Public License v2 or later (GPLv2+)",
"GPL-2.0-or-later": "GNU General Public License v2 or later (GPLv2+)",
"GPL-3.0": "GNU General Public License v3 (GPLv3)",
"GPL-3.0-only": "GNU General Public License v3 (GPLv3)",
"GPL-3.0+": "GNU General Public License v3 or later (GPLv3+)",
"GPL-3.0-or-later": "GNU General Public License v3 or later (GPLv3+)",
"LGPL-2.0": "GNU Lesser General Public License v2 (LGPLv2)",
"LGPL-2.0-only": "GNU Lesser General Public License v2 (LGPLv2)",
"LGPL-2.0+": "GNU Lesser General Public License v2 or later (LGPLv2+)",
"LGPL-2.0-or-later": "GNU Lesser General Public License v2 or later (LGPLv2+)",
"LGPL-3.0": "GNU Lesser General Public License v3 (LGPLv3)",
"LGPL-3.0-only": "GNU Lesser General Public License v3 (LGPLv3)",
"LGPL-3.0+": "GNU Lesser General Public License v3 or later (LGPLv3+)",
"LGPL-3.0-or-later": "GNU Lesser General Public License v3 or later (LGPLv3+)",
"MPL-1.0": "Mozilla Public License 1.0 (MPL)",
"MPL-1.1": "Mozilla Public License 1.1 (MPL 1.1)",
"MPL-2.0": "Mozilla Public License 2.0 (MPL 2.0)",
"W3C": "W3C License",
"ZPL-1.1": "Zope Public License",
"ZPL-2.0": "Zope Public License",
"ZPL-2.1": "Zope Public License",
}
@property
def classifier(self) -> str:
parts = ["License"]
if self.is_osi_approved:
parts.append("OSI Approved")
name = self.classifier_name
if name is not None:
parts.append(name)
return " :: ".join(parts)
@property
def classifier_name(self) -> Optional[str]:
if self.id not in self.CLASSIFIER_SUPPORTED:
if self.is_osi_approved:
return None
return "Other/Proprietary License"
if self.id in self.CLASSIFIER_NAMES:
return self.CLASSIFIER_NAMES[self.id]
return self.name | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/spdx/license.py | 0.713232 | 0.246885 | license.py | pypi |
from typing import TYPE_CHECKING
from typing import Any
from typing import List
from .empty_constraint import EmptyConstraint
from .version_constraint import VersionConstraint
from .version_range_constraint import VersionRangeConstraint
if TYPE_CHECKING:
from poetry.core.semver.helpers import VersionTypes
from poetry.core.semver.version import Version
from poetry.core.semver.version_range import VersionRange
class VersionUnion(VersionConstraint):
"""
A version constraint representing a union of multiple disjoint version
ranges.
An instance of this will only be created if the version can't be represented
as a non-compound value.
"""
def __init__(self, *ranges: "VersionRange") -> None:
self._ranges = list(ranges)
@property
def ranges(self) -> List["VersionRange"]:
return self._ranges
@classmethod
def of(cls, *ranges: "VersionTypes") -> "VersionTypes":
from .version_range import VersionRange
flattened = []
for constraint in ranges:
if constraint.is_empty():
continue
if isinstance(constraint, VersionUnion):
flattened += constraint.ranges
continue
flattened.append(constraint)
if not flattened:
return EmptyConstraint()
if any([constraint.is_any() for constraint in flattened]):
return VersionRange()
# Only allow Versions and VersionRanges here so we can more easily reason
# about everything in flattened. _EmptyVersions and VersionUnions are
# filtered out above.
for constraint in flattened:
if isinstance(constraint, VersionRangeConstraint):
continue
raise ValueError("Unknown VersionConstraint type {}.".format(constraint))
flattened.sort()
merged = []
for constraint in flattened:
# Merge this constraint with the previous one, but only if they touch.
if not merged or (
not merged[-1].allows_any(constraint)
and not merged[-1].is_adjacent_to(constraint)
):
merged.append(constraint)
else:
merged[-1] = merged[-1].union(constraint)
if len(merged) == 1:
return merged[0]
return VersionUnion(*merged)
def is_empty(self) -> bool:
return False
def is_any(self) -> bool:
return False
def allows(self, version: "Version") -> bool:
return any([constraint.allows(version) for constraint in self._ranges])
def allows_all(self, other: "VersionTypes") -> bool:
our_ranges = iter(self._ranges)
their_ranges = iter(self._ranges_for(other))
our_current_range = next(our_ranges, None)
their_current_range = next(their_ranges, None)
while our_current_range and their_current_range:
if our_current_range.allows_all(their_current_range):
their_current_range = next(their_ranges, None)
else:
our_current_range = next(our_ranges, None)
return their_current_range is None
def allows_any(self, other: "VersionTypes") -> bool:
our_ranges = iter(self._ranges)
their_ranges = iter(self._ranges_for(other))
our_current_range = next(our_ranges, None)
their_current_range = next(their_ranges, None)
while our_current_range and their_current_range:
if our_current_range.allows_any(their_current_range):
return True
if their_current_range.allows_higher(our_current_range):
our_current_range = next(our_ranges, None)
else:
their_current_range = next(their_ranges, None)
return False
def intersect(self, other: "VersionTypes") -> "VersionTypes":
our_ranges = iter(self._ranges)
their_ranges = iter(self._ranges_for(other))
new_ranges = []
our_current_range = next(our_ranges, None)
their_current_range = next(their_ranges, None)
while our_current_range and their_current_range:
intersection = our_current_range.intersect(their_current_range)
if not intersection.is_empty():
new_ranges.append(intersection)
if their_current_range.allows_higher(our_current_range):
our_current_range = next(our_ranges, None)
else:
their_current_range = next(their_ranges, None)
return VersionUnion.of(*new_ranges)
def union(self, other: "VersionTypes") -> "VersionTypes":
return VersionUnion.of(self, other)
def difference(self, other: "VersionTypes") -> "VersionTypes":
our_ranges = iter(self._ranges)
their_ranges = iter(self._ranges_for(other))
new_ranges = []
state = {
"current": next(our_ranges, None),
"their_range": next(their_ranges, None),
}
def their_next_range() -> bool:
state["their_range"] = next(their_ranges, None)
if state["their_range"]:
return True
new_ranges.append(state["current"])
our_current = next(our_ranges, None)
while our_current:
new_ranges.append(our_current)
our_current = next(our_ranges, None)
return False
def our_next_range(include_current: bool = True) -> bool:
if include_current:
new_ranges.append(state["current"])
our_current = next(our_ranges, None)
if not our_current:
return False
state["current"] = our_current
return True
while True:
if state["their_range"] is None:
break
if state["their_range"].is_strictly_lower(state["current"]):
if not their_next_range():
break
continue
if state["their_range"].is_strictly_higher(state["current"]):
if not our_next_range():
break
continue
difference = state["current"].difference(state["their_range"])
if isinstance(difference, VersionUnion):
assert len(difference.ranges) == 2
new_ranges.append(difference.ranges[0])
state["current"] = difference.ranges[-1]
if not their_next_range():
break
elif difference.is_empty():
if not our_next_range(False):
break
else:
state["current"] = difference
if state["current"].allows_higher(state["their_range"]):
if not their_next_range():
break
else:
if not our_next_range():
break
if not new_ranges:
return EmptyConstraint()
if len(new_ranges) == 1:
return new_ranges[0]
return VersionUnion.of(*new_ranges)
def _ranges_for(self, constraint: "VersionTypes") -> List["VersionRangeConstraint"]:
if constraint.is_empty():
return []
if isinstance(constraint, VersionUnion):
return constraint.ranges
if isinstance(constraint, VersionRangeConstraint):
return [constraint]
raise ValueError("Unknown VersionConstraint type {}".format(constraint))
def excludes_single_version(self) -> bool:
from .version import Version
from .version_range import VersionRange
return isinstance(VersionRange().difference(self), Version)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, VersionUnion):
return False
return self._ranges == other.ranges
def __hash__(self) -> int:
h = hash(self._ranges[0])
for range in self._ranges[1:]:
h ^= hash(range)
return h
def __str__(self) -> str:
from .version_range import VersionRange
if self.excludes_single_version():
return "!={}".format(VersionRange().difference(self))
return " || ".join([str(r) for r in self._ranges])
def __repr__(self) -> str:
return "<VersionUnion {}>".format(str(self)) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/semver/version_union.py | 0.901529 | 0.319546 | version_union.py | pypi |
from typing import TYPE_CHECKING
from typing import Any
from typing import List
from typing import Optional
from poetry.core.semver.empty_constraint import EmptyConstraint
from poetry.core.semver.version_range_constraint import VersionRangeConstraint
from poetry.core.semver.version_union import VersionUnion
if TYPE_CHECKING:
from poetry.core.semver.helpers import VersionTypes
from poetry.core.semver.version import Version
class VersionRange(VersionRangeConstraint):
def __init__(
self,
min: Optional["Version"] = None,
max: Optional["Version"] = None,
include_min: bool = False,
include_max: bool = False,
always_include_max_prerelease: bool = False,
):
full_max = max
if (
not always_include_max_prerelease
and not include_max
and full_max is not None
and full_max.is_stable()
and not full_max.is_postrelease()
and (min is None or min.is_stable() or min.release != full_max.release)
):
full_max = full_max.first_pre_release()
self._min = min
self._max = max
self._full_max = full_max
self._include_min = include_min
self._include_max = include_max
@property
def min(self) -> "Version":
return self._min
@property
def max(self) -> "Version":
return self._max
@property
def full_max(self) -> "Version":
return self._full_max
@property
def include_min(self) -> bool:
return self._include_min
@property
def include_max(self) -> bool:
return self._include_max
def is_empty(self) -> bool:
return False
def is_any(self) -> bool:
return self._min is None and self._max is None
def allows(self, other: "Version") -> bool:
if self._min is not None:
if other < self._min:
return False
if not self._include_min and other == self._min:
return False
if self.full_max is not None:
_this, _other = self.full_max, other
if not _this.is_local() and _other.is_local():
# allow weak equality to allow `3.0.0+local.1` for `<=3.0.0`
_other = _other.without_local()
if not _this.is_postrelease() and _other.is_postrelease():
# allow weak equality to allow `3.0.0-1` for `<=3.0.0`
_other = _other.without_postrelease()
if _other > _this:
return False
if not self._include_max and _other == _this:
return False
return True
def allows_all(self, other: "VersionTypes") -> bool:
from .version import Version
if other.is_empty():
return True
if isinstance(other, Version):
return self.allows(other)
if isinstance(other, VersionUnion):
return all([self.allows_all(constraint) for constraint in other.ranges])
if isinstance(other, VersionRangeConstraint):
return not other.allows_lower(self) and not other.allows_higher(self)
raise ValueError("Unknown VersionConstraint type {}.".format(other))
def allows_any(self, other: "VersionTypes") -> bool:
from .version import Version
if other.is_empty():
return False
if isinstance(other, Version):
return self.allows(other)
if isinstance(other, VersionUnion):
return any([self.allows_any(constraint) for constraint in other.ranges])
if isinstance(other, VersionRangeConstraint):
return not other.is_strictly_lower(self) and not other.is_strictly_higher(
self
)
raise ValueError("Unknown VersionConstraint type {}.".format(other))
def intersect(self, other: "VersionTypes") -> "VersionTypes":
from .version import Version
if other.is_empty():
return other
if isinstance(other, VersionUnion):
return other.intersect(self)
# A range and a Version just yields the version if it's in the range.
if isinstance(other, Version):
if self.allows(other):
return other
return EmptyConstraint()
if not isinstance(other, VersionRangeConstraint):
raise ValueError("Unknown VersionConstraint type {}.".format(other))
if self.allows_lower(other):
if self.is_strictly_lower(other):
return EmptyConstraint()
intersect_min = other.min
intersect_include_min = other.include_min
else:
if other.is_strictly_lower(self):
return EmptyConstraint()
intersect_min = self._min
intersect_include_min = self._include_min
if self.allows_higher(other):
intersect_max = other.max
intersect_include_max = other.include_max
else:
intersect_max = self._max
intersect_include_max = self._include_max
if intersect_min is None and intersect_max is None:
return VersionRange()
# If the range is just a single version.
if intersect_min == intersect_max:
# Because we already verified that the lower range isn't strictly
# lower, there must be some overlap.
assert intersect_include_min and intersect_include_max
return intersect_min
# If we got here, there is an actual range.
return VersionRange(
intersect_min, intersect_max, intersect_include_min, intersect_include_max
)
def union(self, other: "VersionTypes") -> "VersionTypes":
from .version import Version
if isinstance(other, Version):
if self.allows(other):
return self
if other == self.min:
return VersionRange(
self.min, self.max, include_min=True, include_max=self.include_max
)
if other == self.max:
return VersionRange(
self.min, self.max, include_min=self.include_min, include_max=True
)
return VersionUnion.of(self, other)
if isinstance(other, VersionRangeConstraint):
# If the two ranges don't overlap, we won't be able to create a single
# VersionRange for both of them.
edges_touch = (
self.max == other.min and (self.include_max or other.include_min)
) or (self.min == other.max and (self.include_min or other.include_max))
if not edges_touch and not self.allows_any(other):
return VersionUnion.of(self, other)
if self.allows_lower(other):
union_min = self.min
union_include_min = self.include_min
else:
union_min = other.min
union_include_min = other.include_min
if self.allows_higher(other):
union_max = self.max
union_include_max = self.include_max
else:
union_max = other.max
union_include_max = other.include_max
return VersionRange(
union_min,
union_max,
include_min=union_include_min,
include_max=union_include_max,
)
return VersionUnion.of(self, other)
def difference(self, other: "VersionTypes") -> "VersionTypes":
from .version import Version
if other.is_empty():
return self
if isinstance(other, Version):
if not self.allows(other):
return self
if other == self.min:
if not self.include_min:
return self
return VersionRange(self.min, self.max, False, self.include_max)
if other == self.max:
if not self.include_max:
return self
return VersionRange(self.min, self.max, self.include_min, False)
return VersionUnion.of(
VersionRange(self.min, other, self.include_min, False),
VersionRange(other, self.max, False, self.include_max),
)
elif isinstance(other, VersionRangeConstraint):
if not self.allows_any(other):
return self
if not self.allows_lower(other):
before = None
elif self.min == other.min:
before = self.min
else:
before = VersionRange(
self.min, other.min, self.include_min, not other.include_min
)
if not self.allows_higher(other):
after = None
elif self.max == other.max:
after = self.max
else:
after = VersionRange(
other.max, self.max, not other.include_max, self.include_max
)
if before is None and after is None:
return EmptyConstraint()
if before is None:
return after
if after is None:
return before
return VersionUnion.of(before, after)
elif isinstance(other, VersionUnion):
ranges: List[VersionRange] = []
current = self
for range in other.ranges:
# Skip any ranges that are strictly lower than [current].
if range.is_strictly_lower(current):
continue
# If we reach a range strictly higher than [current], no more ranges
# will be relevant so we can bail early.
if range.is_strictly_higher(current):
break
difference = current.difference(range)
if difference.is_empty():
return EmptyConstraint()
elif isinstance(difference, VersionUnion):
# If [range] split [current] in half, we only need to continue
# checking future ranges against the latter half.
ranges.append(difference.ranges[0])
current = difference.ranges[-1]
else:
current = difference
if not ranges:
return current
return VersionUnion.of(*(ranges + [current]))
raise ValueError("Unknown VersionConstraint type {}.".format(other))
def __eq__(self, other: Any) -> int:
if not isinstance(other, VersionRangeConstraint):
return False
return (
self._min == other.min
and self._max == other.max
and self._include_min == other.include_min
and self._include_max == other.include_max
)
def __lt__(self, other: "VersionRangeConstraint") -> int:
return self._cmp(other) < 0
def __le__(self, other: "VersionRangeConstraint") -> int:
return self._cmp(other) <= 0
def __gt__(self, other: "VersionRangeConstraint") -> int:
return self._cmp(other) > 0
def __ge__(self, other: "VersionRangeConstraint") -> int:
return self._cmp(other) >= 0
def _cmp(self, other: "VersionRangeConstraint") -> int:
if self.min is None:
if other.min is None:
return self._compare_max(other)
return -1
elif other.min is None:
return 1
if self.min > other.min:
return 1
elif self.min < other.min:
return -1
if self.include_min != other.include_min:
return -1 if self.include_min else 1
return self._compare_max(other)
def _compare_max(self, other: "VersionRangeConstraint") -> int:
if self.max is None:
if other.max is None:
return 0
return 1
elif other.max is None:
return -1
if self.max > other.max:
return 1
elif self.max < other.max:
return -1
if self.include_max != other.include_max:
return 1 if self.include_max else -1
return 0
def __str__(self) -> str:
text = ""
if self.min is not None:
text += ">=" if self.include_min else ">"
text += self.min.text
if self.max is not None:
if self.min is not None:
text += ","
text += "{}{}".format("<=" if self.include_max else "<", self.max.text)
if self.min is None and self.max is None:
return "*"
return text
def __repr__(self) -> str:
return "<VersionRange ({})>".format(str(self))
def __hash__(self) -> int:
return (
hash(self.min)
^ hash(self.max)
^ hash(self.include_min)
^ hash(self.include_max)
) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/semver/version_range.py | 0.919258 | 0.198627 | version_range.py | pypi |
import re
from typing import TYPE_CHECKING
from typing import Union
if TYPE_CHECKING:
from .empty_constraint import EmptyConstraint # noqa
from .version import Version # noqa
from .version_range import VersionRange # noqa
from .version_union import VersionUnion # noqa
VersionTypes = Union["Version", "VersionRange", "VersionUnion", "EmptyConstraint"]
def parse_constraint(constraints: str) -> VersionTypes:
if constraints == "*":
from .version_range import VersionRange
return VersionRange()
or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip())
or_groups = []
for constraints in or_constraints:
and_constraints = re.split(
"(?<!^)(?<![~=>< ,]) *(?<!-)[, ](?!-) *(?!,|$)", constraints
)
constraint_objects = []
if len(and_constraints) > 1:
for constraint in and_constraints:
constraint_objects.append(parse_single_constraint(constraint))
else:
constraint_objects.append(parse_single_constraint(and_constraints[0]))
if len(constraint_objects) == 1:
constraint = constraint_objects[0]
else:
constraint = constraint_objects[0]
for next_constraint in constraint_objects[1:]:
constraint = constraint.intersect(next_constraint)
or_groups.append(constraint)
if len(or_groups) == 1:
return or_groups[0]
else:
from .version_union import VersionUnion
return VersionUnion.of(*or_groups)
def parse_single_constraint(constraint: str) -> VersionTypes:
from .patterns import BASIC_CONSTRAINT
from .patterns import CARET_CONSTRAINT
from .patterns import TILDE_CONSTRAINT
from .patterns import TILDE_PEP440_CONSTRAINT
from .patterns import X_CONSTRAINT
from .version import Version
from .version_range import VersionRange
from .version_union import VersionUnion
m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint)
if m:
return VersionRange()
# Tilde range
m = TILDE_CONSTRAINT.match(constraint)
if m:
version = Version.parse(m.group(1))
high = version.stable.next_minor()
if len(m.group(1).split(".")) == 1:
high = version.stable.next_major()
return VersionRange(version, high, include_min=True)
# PEP 440 Tilde range (~=)
m = TILDE_PEP440_CONSTRAINT.match(constraint)
if m:
precision = 1
if m.group(3):
precision += 1
if m.group(4):
precision += 1
version = Version.parse(m.group(1))
if precision == 2:
high = version.stable.next_major()
else:
high = version.stable.next_minor()
return VersionRange(version, high, include_min=True)
# Caret range
m = CARET_CONSTRAINT.match(constraint)
if m:
version = Version.parse(m.group(1))
return VersionRange(version, version.next_breaking(), include_min=True)
# X Range
m = X_CONSTRAINT.match(constraint)
if m:
op = m.group(1)
major = int(m.group(2))
minor = m.group(3)
if minor is not None:
version = Version.from_parts(major, int(minor), 0)
result = VersionRange(version, version.next_minor(), include_min=True)
else:
if major == 0:
result = VersionRange(max=Version.from_parts(1, 0, 0))
else:
version = Version.from_parts(major, 0, 0)
result = VersionRange(version, version.next_major(), include_min=True)
if op == "!=":
result = VersionRange().difference(result)
return result
# Basic comparator
m = BASIC_CONSTRAINT.match(constraint)
if m:
op = m.group(1)
version = m.group(2)
# Technically invalid constraints like `>= 3.*` will appear
# here as `3.`.
# Pip currently supports these and to avoid breaking existing
# users workflows we need to support them as well. To do so,
# we just remove the inconsequential part.
version = version.rstrip(".")
if version == "dev":
version = "0.0-dev"
try:
version = Version.parse(version)
except ValueError:
raise ValueError(
"Could not parse version constraint: {}".format(constraint)
)
if op == "<":
return VersionRange(max=version)
elif op == "<=":
return VersionRange(max=version, include_max=True)
elif op == ">":
return VersionRange(min=version)
elif op == ">=":
return VersionRange(min=version, include_min=True)
elif op == "!=":
return VersionUnion(VersionRange(max=version), VersionRange(min=version))
else:
return version
from .exceptions import ParseConstraintError
raise ParseConstraintError(
"Could not parse version constraint: {}".format(constraint)
) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/semver/helpers.py | 0.692226 | 0.281227 | helpers.py | pypi |
import dataclasses
from typing import TYPE_CHECKING
from typing import Optional
from typing import Tuple
from typing import Union
from poetry.core.semver.empty_constraint import EmptyConstraint
from poetry.core.semver.version_range_constraint import VersionRangeConstraint
from poetry.core.semver.version_union import VersionUnion
from poetry.core.version.pep440 import Release
from poetry.core.version.pep440 import ReleaseTag
from poetry.core.version.pep440.version import PEP440Version
if TYPE_CHECKING:
from poetry.core.semver.helpers import VersionTypes
from poetry.core.version.pep440 import LocalSegmentType
@dataclasses.dataclass(frozen=True)
class Version(PEP440Version, VersionRangeConstraint):
"""
A parsed semantic version number.
"""
@property
def precision(self) -> int:
return self.release.precision
@property
def stable(self) -> "Version":
if self.is_stable():
return self
return self.next_patch()
def next_breaking(self) -> "Version":
if self.major == 0:
if self.minor is not None and self.minor != 0:
return self.next_minor()
if self.precision == 1:
return self.next_major()
elif self.precision == 2:
return self.next_minor()
return self.next_patch()
return self.stable.next_major()
def first_pre_release(self) -> "Version":
# noinspection PyArgumentList
return self.__class__(release=self.release, pre=ReleaseTag("alpha"))
@property
def min(self) -> "Version":
return self
@property
def max(self) -> "Version":
return self
@property
def full_max(self) -> "Version":
return self
@property
def include_min(self) -> bool:
return True
@property
def include_max(self) -> bool:
return True
def is_any(self) -> bool:
return False
def is_empty(self) -> bool:
return False
def allows(self, version: "Version") -> bool:
if version is None:
return False
_this, _other = self, version
# allow weak equality to allow `3.0.0+local.1` for `3.0.0`
if not _this.is_local() and _other.is_local():
_other = _other.without_local()
elif _this.is_local() and not _other.is_local():
_this = _this.without_local()
# allow weak equality to allow `3.0.0-1` for `3.0.0`
if not _this.is_postrelease() and _other.is_postrelease():
_other = _other.without_postrelease()
elif _this.without_postrelease() and not _other.without_postrelease():
_this = _this.without_postrelease()
return _this == _other
def allows_all(self, other: "VersionTypes") -> bool:
return other.is_empty() or (
self.allows(other) if isinstance(other, self.__class__) else other == self
)
def allows_any(self, other: "VersionTypes") -> bool:
return other.allows(self)
def intersect(self, other: "VersionTypes") -> Union["Version", EmptyConstraint]:
if other.allows(self):
return self
return EmptyConstraint()
def union(self, other: "VersionTypes") -> "VersionTypes":
from poetry.core.semver.version_range import VersionRange
if other.allows(self):
return other
if isinstance(other, VersionRangeConstraint):
if self.allows(other.min):
return VersionRange(
other.min,
other.max,
include_min=True,
include_max=other.include_max,
)
if self.allows(other.max):
return VersionRange(
other.min,
other.max,
include_min=other.include_min,
include_max=True,
)
return VersionUnion.of(self, other)
def difference(self, other: "VersionTypes") -> Union["Version", EmptyConstraint]:
if other.allows(self):
return EmptyConstraint()
return self
def __str__(self) -> str:
return self.text
def __repr__(self) -> str:
return "<Version {}>".format(str(self))
def __eq__(self, other: Union["Version", "VersionRangeConstraint"]) -> bool:
from poetry.core.semver.version_range import VersionRange
if isinstance(other, VersionRange):
return (
self == other.min
and self == other.max
and (other.include_min or other.include_max)
)
return super().__eq__(other)
@classmethod
def from_parts(
cls,
major: int,
minor: Optional[int] = None,
patch: Optional[int] = None,
extra: Optional[Union[int, Tuple[int, ...]]] = None,
pre: Optional[ReleaseTag] = None,
post: Optional[ReleaseTag] = None,
dev: Optional[ReleaseTag] = None,
local: "LocalSegmentType" = None,
):
return cls(
release=Release(major=major, minor=minor, patch=patch, extra=extra),
pre=pre,
post=post,
dev=dev,
local=local,
) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/semver/version.py | 0.893376 | 0.258127 | version.py | pypi |
from abc import abstractmethod
from typing import TYPE_CHECKING
from poetry.core.semver.version_constraint import VersionConstraint
if TYPE_CHECKING:
from poetry.core.semver.version import Version
class VersionRangeConstraint(VersionConstraint):
@property
@abstractmethod
def min(self) -> "Version":
raise NotImplementedError()
@property
@abstractmethod
def max(self) -> "Version":
raise NotImplementedError()
@property
@abstractmethod
def full_max(self) -> "Version":
raise NotImplementedError()
@property
@abstractmethod
def include_min(self) -> bool:
raise NotImplementedError()
@property
@abstractmethod
def include_max(self) -> bool:
raise NotImplementedError()
def allows_lower(self, other: "VersionRangeConstraint") -> bool:
if self.min is None:
return other.min is not None
if other.min is None:
return False
if self.min < other.min:
return True
if self.min > other.min:
return False
return self.include_min and not other.include_min
def allows_higher(self, other: "VersionRangeConstraint") -> bool:
if self.full_max is None:
return other.max is not None
if other.full_max is None:
return False
if self.full_max < other.full_max:
return False
if self.full_max > other.full_max:
return True
return self.include_max and not other.include_max
def is_strictly_lower(self, other: "VersionRangeConstraint") -> bool:
if self.full_max is None or other.min is None:
return False
if self.full_max < other.min:
return True
if self.full_max > other.min:
return False
return not self.include_max or not other.include_min
def is_strictly_higher(self, other: "VersionRangeConstraint") -> bool:
return other.is_strictly_lower(self)
def is_adjacent_to(self, other: "VersionRangeConstraint") -> bool:
if self.max != other.min:
return False
return (
self.include_max
and not other.include_min
or not self.include_max
and other.include_min
) | /relaxed_poetry_core-0.4.1-py3-none-any.whl/poetry/core/semver/version_range_constraint.py | 0.797557 | 0.165762 | version_range_constraint.py | pypi |
from typing import TYPE_CHECKING
from typing import Optional
from typing import Union
from poetry.core.packages.package import Package
from poetry.core.semver.version import Version
if TYPE_CHECKING:
from poetry.repositories import Pool
class VersionSelector:
def __init__(self, pool: "Pool") -> None:
self._pool = pool
def find_best_candidate(
self,
package_name: str,
target_package_version: Optional[str] = None,
allow_prereleases: bool = False,
source: Optional[str] = None,
) -> Union[Package, bool]:
"""
Given a package name and optional version,
returns the latest Package that matches
"""
from poetry.factory import Factory
dependency = Factory.create_dependency(
package_name,
{
"version": target_package_version or "*",
"allow_prereleases": allow_prereleases,
"source": source,
},
)
candidates = self._pool.find_packages(dependency)
only_prereleases = all([c.version.is_unstable() for c in candidates])
if not candidates:
return False
package = None
for candidate in candidates:
if (
candidate.is_prerelease()
and not dependency.allows_prereleases()
and not only_prereleases
):
continue
# Select highest version of the two
if package is None or package.version < candidate.version:
package = candidate
if package is None:
return False
return package
def find_recommended_require_version(self, package: Package) -> str:
version = package.version
return self._transform_version(version.text, package.pretty_version)
def _transform_version(self, version: str, pretty_version: str) -> str:
try:
return f"^{Version.parse(version).to_string()}"
except ValueError:
return pretty_version | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/version/version_selector.py | 0.88785 | 0.217732 | version_selector.py | pypi |
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from poetry.core.semver.helpers import parse_constraint
from .incompatibility import Incompatibility
from .incompatibility_cause import ConflictCause
from .incompatibility_cause import PythonCause
class SolveFailure(Exception):
def __init__(self, incompatibility: Incompatibility) -> None:
self._incompatibility = incompatibility
@property
def message(self) -> str:
return str(self)
def __str__(self) -> str:
return _Writer(self._incompatibility).write()
class _Writer:
def __init__(self, root: Incompatibility) -> None:
self._root = root
self._derivations: Dict[Incompatibility, int] = {}
self._lines: List[Tuple[str, Optional[int]]] = []
self._line_numbers: Dict[Incompatibility, int] = {}
self._count_derivations(self._root)
def write(self) -> str:
buffer = []
required_python_version_notification = False
for incompatibility in self._root.external_incompatibilities:
if isinstance(incompatibility.cause, PythonCause):
if not required_python_version_notification:
buffer.append(
"The installed project's Python version ({}) "
"is not compatible with some of the required "
"packages Python requirement:".format(
incompatibility.cause.root_python_version
)
)
required_python_version_notification = True
root_constraint = parse_constraint(
incompatibility.cause.root_python_version
)
constraint = parse_constraint(incompatibility.cause.python_version)
buffer.append(
" - {} requires Python {}, so it will not be satisfied for Python {}".format(
incompatibility.terms[0].dependency.name,
incompatibility.cause.python_version,
root_constraint.difference(constraint),
)
)
if required_python_version_notification:
buffer.append("")
if isinstance(self._root.cause, ConflictCause):
self._visit(self._root, {})
else:
self._write(self._root, f"Because {self._root}, version solving failed.")
padding = (
0
if not self._line_numbers
else len("({}) ".format(list(self._line_numbers.values())[-1]))
)
last_was_empty = False
for line in self._lines:
message = line[0]
if not message:
if not last_was_empty:
buffer.append("")
last_was_empty = True
continue
last_was_empty = False
number = line[-1]
if number is not None:
message = f"({number})".ljust(padding) + message
else:
message = " " * padding + message
buffer.append(message)
return "\n".join(buffer)
def _write(
self, incompatibility: Incompatibility, message: str, numbered: bool = False
) -> None:
if numbered:
number = len(self._line_numbers) + 1
self._line_numbers[incompatibility] = number
self._lines.append((message, number))
else:
self._lines.append((message, None))
def _visit(
self,
incompatibility: Incompatibility,
details_for_incompatibility: Dict,
conclusion: bool = False,
) -> None:
numbered = conclusion or self._derivations[incompatibility] > 1
conjunction = "So," if conclusion or incompatibility == self._root else "And"
incompatibility_string = str(incompatibility)
cause = incompatibility.cause
details_for_cause = {}
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
conflict_line = self._line_numbers.get(cause.conflict)
other_line = self._line_numbers.get(cause.other)
if conflict_line is not None and other_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, conflict_line, other_line
),
incompatibility_string,
),
numbered=numbered,
)
elif conflict_line is not None or other_line is not None:
if conflict_line is not None:
with_line = cause.conflict
without_line = cause.other
line = conflict_line
else:
with_line = cause.other
without_line = cause.conflict
line = other_line
self._visit(without_line, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}.".format(
conjunction, str(with_line), line, incompatibility_string
),
numbered=numbered,
)
else:
single_line_conflict = self._is_single_line(cause.conflict.cause)
single_line_other = self._is_single_line(cause.other.cause)
if single_line_other or single_line_conflict:
first = cause.conflict if single_line_other else cause.other
second = cause.other if single_line_other else cause.conflict
self._visit(first, details_for_cause)
self._visit(second, details_for_cause)
self._write(
incompatibility,
f"Thus, {incompatibility_string}.",
numbered=numbered,
)
else:
self._visit(cause.conflict, {}, conclusion=True)
self._lines.append(("", None))
self._visit(cause.other, details_for_cause)
self._write(
incompatibility,
"{} because {} ({}), {}".format(
conjunction,
str(cause.conflict),
self._line_numbers[cause.conflict],
incompatibility_string,
),
numbered=numbered,
)
elif isinstance(cause.conflict.cause, ConflictCause) or isinstance(
cause.other.cause, ConflictCause
):
derived = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
ext = (
cause.other
if isinstance(cause.conflict.cause, ConflictCause)
else cause.conflict
)
derived_line = self._line_numbers.get(derived)
if derived_line is not None:
self._write(
incompatibility,
"Because {}, {}.".format(
ext.and_to_string(
derived, details_for_cause, None, derived_line
),
incompatibility_string,
),
numbered=numbered,
)
elif self._is_collapsible(derived):
derived_cause: ConflictCause = derived.cause
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_derived = derived_cause.conflict
else:
collapsed_derived = derived_cause.other
if isinstance(derived_cause.conflict.cause, ConflictCause):
collapsed_ext = derived_cause.other
else:
collapsed_ext = derived_cause.conflict
details_for_cause = {}
self._visit(collapsed_derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction,
collapsed_ext.and_to_string(ext, details_for_cause, None, None),
incompatibility_string,
),
numbered=numbered,
)
else:
self._visit(derived, details_for_cause)
self._write(
incompatibility,
"{} because {}, {}.".format(
conjunction, str(ext), incompatibility_string
),
numbered=numbered,
)
else:
self._write(
incompatibility,
"Because {}, {}.".format(
cause.conflict.and_to_string(
cause.other, details_for_cause, None, None
),
incompatibility_string,
),
numbered=numbered,
)
def _is_collapsible(self, incompatibility: Incompatibility) -> bool:
if self._derivations[incompatibility] > 1:
return False
cause: ConflictCause = incompatibility.cause
if isinstance(cause.conflict.cause, ConflictCause) and isinstance(
cause.other.cause, ConflictCause
):
return False
if not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
):
return False
complex = (
cause.conflict
if isinstance(cause.conflict.cause, ConflictCause)
else cause.other
)
return complex not in self._line_numbers
def _is_single_line(self, cause: ConflictCause) -> bool:
return not isinstance(cause.conflict.cause, ConflictCause) and not isinstance(
cause.other.cause, ConflictCause
)
def _count_derivations(self, incompatibility: Incompatibility) -> None:
if incompatibility in self._derivations:
self._derivations[incompatibility] += 1
else:
self._derivations[incompatibility] = 1
cause = incompatibility.cause
if isinstance(cause, ConflictCause):
self._count_derivations(cause.conflict)
self._count_derivations(cause.other) | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/mixology/failure.py | 0.810179 | 0.15863 | failure.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from poetry.mixology.incompatibility import Incompatibility
class IncompatibilityCause(Exception):
"""
The reason and Incompatibility's terms are incompatible.
"""
class RootCause(IncompatibilityCause):
pass
class NoVersionsCause(IncompatibilityCause):
pass
class DependencyCause(IncompatibilityCause):
pass
class ConflictCause(IncompatibilityCause):
"""
The incompatibility was derived from two existing incompatibilities
during conflict resolution.
"""
def __init__(self, conflict: "Incompatibility", other: "Incompatibility") -> None:
self._conflict = conflict
self._other = other
@property
def conflict(self) -> "Incompatibility":
return self._conflict
@property
def other(self) -> "Incompatibility":
return self._other
def __str__(self) -> str:
return str(self._conflict)
class PythonCause(IncompatibilityCause):
"""
The incompatibility represents a package's python constraint
(Python versions) being incompatible
with the current python version.
"""
def __init__(self, python_version: str, root_python_version: str) -> None:
self._python_version = python_version
self._root_python_version = root_python_version
@property
def python_version(self) -> str:
return self._python_version
@property
def root_python_version(self) -> str:
return self._root_python_version
class PlatformCause(IncompatibilityCause):
"""
The incompatibility represents a package's platform constraint
(OS most likely) being incompatible with the current platform.
"""
def __init__(self, platform: str) -> None:
self._platform = platform
@property
def platform(self) -> str:
return self._platform
class PackageNotFoundCause(IncompatibilityCause):
"""
The incompatibility represents a package that couldn't be found by its
source.
"""
def __init__(self, error: Exception) -> None:
self._error = error
@property
def error(self) -> Exception:
return self._error | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/mixology/incompatibility_cause.py | 0.924993 | 0.384508 | incompatibility_cause.py | pypi |
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Union
from .incompatibility_cause import ConflictCause
from .incompatibility_cause import DependencyCause
from .incompatibility_cause import IncompatibilityCause
from .incompatibility_cause import NoVersionsCause
from .incompatibility_cause import PackageNotFoundCause
from .incompatibility_cause import PlatformCause
from .incompatibility_cause import PythonCause
from .incompatibility_cause import RootCause
from .term import Term
class Incompatibility:
def __init__(self, terms: List[Term], cause: IncompatibilityCause) -> None:
# Remove the root package from generated incompatibilities, since it will
# always be satisfied. This makes error reporting clearer, and may also
# make solving more efficient.
if (
len(terms) != 1
and isinstance(cause, ConflictCause)
and any(term.is_positive() and term.dependency.is_root for term in terms)
):
terms = [
term
for term in terms
if not term.is_positive() or not term.dependency.is_root
]
if (
len(terms) == 1
# Short-circuit in the common case of a two-term incompatibility with
# two different packages (for example, a dependency).
or len(terms) == 2
and terms[0].dependency.complete_name != terms[-1].dependency.complete_name
):
pass
else:
# Coalesce multiple terms about the same package if possible.
by_name: Dict[str, Dict[str, Term]] = {}
for term in terms:
if term.dependency.complete_name not in by_name:
by_name[term.dependency.complete_name] = {}
by_ref = by_name[term.dependency.complete_name]
ref = term.dependency.complete_name
if ref in by_ref:
by_ref[ref] = by_ref[ref].intersect(term)
# If we have two terms that refer to the same package but have a null
# intersection, they're mutually exclusive, making this incompatibility
# irrelevant, since we already know that mutually exclusive version
# ranges are incompatible. We should never derive an irrelevant
# incompatibility.
assert by_ref[ref] is not None
else:
by_ref[ref] = term
new_terms = []
for by_ref in by_name.values():
positive_terms = [
term for term in by_ref.values() if term.is_positive()
]
if positive_terms:
new_terms += positive_terms
continue
new_terms += list(by_ref.values())
terms = new_terms
self._terms = terms
self._cause = cause
@property
def terms(self) -> List[Term]:
return self._terms
@property
def cause(
self,
) -> Union[
RootCause,
NoVersionsCause,
DependencyCause,
ConflictCause,
PythonCause,
PlatformCause,
PackageNotFoundCause,
]:
return self._cause
@property
def external_incompatibilities(
self,
) -> Iterator[Union[ConflictCause, "Incompatibility"]]:
"""
Returns all external incompatibilities in this incompatibility's
derivation graph.
"""
if isinstance(self._cause, ConflictCause):
cause: ConflictCause = self._cause
yield from cause.conflict.external_incompatibilities
yield from cause.other.external_incompatibilities
else:
yield self
def is_failure(self) -> bool:
return len(self._terms) == 0 or (
len(self._terms) == 1 and self._terms[0].dependency.is_root
)
def __str__(self) -> str:
if isinstance(self._cause, DependencyCause):
assert len(self._terms) == 2
depender = self._terms[0]
dependee = self._terms[1]
assert depender.is_positive()
assert not dependee.is_positive()
return "{} depends on {}".format(
self._terse(depender, allow_every=True), self._terse(dependee)
)
elif isinstance(self._cause, PythonCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PythonCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"Python {cause.python_version}"
return text
elif isinstance(self._cause, PlatformCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
cause: PlatformCause = self._cause
text = "{} requires ".format(self._terse(self._terms[0], allow_every=True))
text += f"platform {cause.platform}"
return text
elif isinstance(self._cause, NoVersionsCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "no versions of {} match {}".format(
self._terms[0].dependency.name, self._terms[0].constraint
)
elif isinstance(self._cause, PackageNotFoundCause):
assert len(self._terms) == 1
assert self._terms[0].is_positive()
return "{} doesn't exist".format(self._terms[0].dependency.name)
elif isinstance(self._cause, RootCause):
assert len(self._terms) == 1
assert not self._terms[0].is_positive()
assert self._terms[0].dependency.is_root
return "{} is not {}".format(
self._terms[0].dependency.name, self._terms[0].dependency.constraint
)
elif self.is_failure():
return "version solving failed"
if len(self._terms) == 1:
term = self._terms[0]
if term.constraint.is_any():
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
else:
return "{} is {}".format(
term.dependency.name,
"forbidden" if term.is_positive() else "required",
)
if len(self._terms) == 2:
term1 = self._terms[0]
term2 = self._terms[1]
if term1.is_positive() == term2.is_positive():
if term1.is_positive():
package1 = (
term1.dependency.name
if term1.constraint.is_any()
else self._terse(term1)
)
package2 = (
term2.dependency.name
if term2.constraint.is_any()
else self._terse(term2)
)
return f"{package1} is incompatible with {package2}"
else:
return "either {} or {}".format(
self._terse(term1), self._terse(term2)
)
positive = []
negative = []
for term in self._terms:
if term.is_positive():
positive.append(self._terse(term))
else:
negative.append(self._terse(term))
if positive and negative:
if len(positive) == 1:
positive_term = [term for term in self._terms if term.is_positive()][0]
return "{} requires {}".format(
self._terse(positive_term, allow_every=True), " or ".join(negative)
)
else:
return "if {} then {}".format(
" and ".join(positive), " or ".join(negative)
)
elif positive:
return "one of {} must be false".format(" or ".join(positive))
else:
return "one of {} must be true".format(" or ".join(negative))
def and_to_string(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> str:
requires_both = self._try_requires_both(other, details, this_line, other_line)
if requires_both is not None:
return requires_both
requires_through = self._try_requires_through(
other, details, this_line, other_line
)
if requires_through is not None:
return requires_through
requires_forbidden = self._try_requires_forbidden(
other, details, this_line, other_line
)
if requires_forbidden is not None:
return requires_forbidden
buffer = [str(self)]
if this_line is not None:
buffer.append(" " + str(this_line))
buffer.append(" and {}".format(str(other)))
if other_line is not None:
buffer.append(" " + str(other_line))
return "\n".join(buffer)
def _try_requires_both(
self,
other: "Incompatibility",
details: dict,
this_line: Optional[int],
other_line: Optional[int],
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return
this_positive = self._single_term_where(lambda term: term.is_positive())
if this_positive is None:
return
other_positive = other._single_term_where(lambda term: term.is_positive())
if other_positive is None:
return
if this_positive.dependency != other_positive.dependency:
return
this_negatives = " or ".join(
[self._terse(term) for term in self._terms if not term.is_positive()]
)
other_negatives = " or ".join(
[self._terse(term) for term in other.terms if not term.is_positive()]
)
buffer = [self._terse(this_positive, allow_every=True) + " "]
is_dependency = isinstance(self.cause, DependencyCause) and isinstance(
other.cause, DependencyCause
)
if is_dependency:
buffer.append("depends on")
else:
buffer.append("requires")
buffer.append(f" both {this_negatives}")
if this_line is not None:
buffer.append(f" ({this_line})")
buffer.append(f" and {other_negatives}")
if other_line is not None:
buffer.append(f" ({other_line})")
return "".join(buffer)
def _try_requires_through(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) == 1 or len(other.terms) == 1:
return
this_negative = self._single_term_where(lambda term: not term.is_positive())
other_negative = other._single_term_where(lambda term: not term.is_positive())
if this_negative is None and other_negative is None:
return
this_positive = self._single_term_where(lambda term: term.is_positive())
other_positive = self._single_term_where(lambda term: term.is_positive())
if (
this_negative is not None
and other_positive is not None
and this_negative.dependency.name == other_positive.dependency.name
and this_negative.inverse.satisfies(other_positive)
):
prior = self
prior_negative = this_negative
prior_line = this_line
latter = other
latter_line = other_line
elif (
other_negative is not None
and this_positive is not None
and other_negative.dependency.name == this_positive.dependency.name
and other_negative.inverse.satisfies(this_positive)
):
prior = other
prior_negative = other_negative
prior_line = other_line
latter = self
latter_line = this_line
else:
return
prior_positives = [term for term in prior.terms if term.is_positive()]
buffer = []
if len(prior_positives) > 1:
prior_string = " or ".join([self._terse(term) for term in prior_positives])
buffer.append(f"if {prior_string} then ")
else:
if isinstance(prior.cause, DependencyCause):
verb = "depends on"
else:
verb = "requires"
buffer.append(
"{} {} ".format(self._terse(prior_positives[0], allow_every=True), verb)
)
buffer.append(self._terse(prior_negative))
if prior_line is not None:
buffer.append(f" ({prior_line})")
buffer.append(" which ")
if isinstance(latter.cause, DependencyCause):
buffer.append("depends on ")
else:
buffer.append("requires ")
buffer.append(
" or ".join(
[self._terse(term) for term in latter.terms if not term.is_positive()]
)
)
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _try_requires_forbidden(
self, other: "Incompatibility", details: dict, this_line: int, other_line: int
) -> Optional[str]:
if len(self._terms) != 1 and len(other.terms) != 1:
return None
if len(self.terms) == 1:
prior = other
latter = self
prior_line = other_line
latter_line = this_line
else:
prior = self
latter = other
prior_line = this_line
latter_line = other_line
negative = prior._single_term_where(lambda term: not term.is_positive())
if negative is None:
return
if not negative.inverse.satisfies(latter.terms[0]):
return
positives = [t for t in prior.terms if t.is_positive()]
buffer = []
if len(positives) > 1:
prior_string = " or ".join([self._terse(term) for term in positives])
buffer.append(f"if {prior_string} then ")
else:
buffer.append(self._terse(positives[0], allow_every=True))
if isinstance(prior.cause, DependencyCause):
buffer.append(" depends on ")
else:
buffer.append(" requires ")
buffer.append(self._terse(latter.terms[0]) + " ")
if prior_line is not None:
buffer.append(f"({prior_line}) ")
if isinstance(latter.cause, PythonCause):
cause: PythonCause = latter.cause
buffer.append(f"which requires Python {cause.python_version}")
elif isinstance(latter.cause, NoVersionsCause):
buffer.append("which doesn't match any versions")
elif isinstance(latter.cause, PackageNotFoundCause):
buffer.append("which doesn't exist")
else:
buffer.append("which is forbidden")
if latter_line is not None:
buffer.append(f" ({latter_line})")
return "".join(buffer)
def _terse(self, term: Term, allow_every: bool = False) -> str:
if allow_every and term.constraint.is_any():
return f"every version of {term.dependency.complete_name}"
if term.dependency.is_root:
return term.dependency.pretty_name
return "{} ({})".format(
term.dependency.pretty_name, term.dependency.pretty_constraint
)
def _single_term_where(self, callable: callable) -> Optional[Term]:
found = None
for term in self._terms:
if not callable(term):
continue
if found is not None:
return
found = term
return found
def __repr__(self) -> str:
return "<Incompatibility {}>".format(str(self)) | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/mixology/incompatibility.py | 0.814348 | 0.394143 | incompatibility.py | pypi |
from typing import TYPE_CHECKING
from typing import List
from crashtest.contracts.solution import Solution
if TYPE_CHECKING:
from poetry.mixology.incompatibility_cause import PackageNotFoundCause
class PythonRequirementSolution(Solution):
def __init__(self, exception: "PackageNotFoundCause") -> None:
from poetry.core.semver.helpers import parse_constraint
from poetry.mixology.incompatibility_cause import PythonCause
self._title = "Check your dependencies Python requirement."
failure = exception.error
version_solutions = []
for incompatibility in failure._incompatibility.external_incompatibilities:
if isinstance(incompatibility.cause, PythonCause):
root_constraint = parse_constraint(
incompatibility.cause.root_python_version
)
constraint = parse_constraint(incompatibility.cause.python_version)
version_solutions.append(
"For <fg=default;options=bold>{}</>, a possible solution would be "
'to set the `<fg=default;options=bold>python</>` property to <fg=yellow>"{}"</>'.format(
incompatibility.terms[0].dependency.name,
constraint,
)
)
description = (
"The Python requirement can be specified via the `<fg=default;options=bold>python</>` "
"or `<fg=default;options=bold>markers</>` properties"
)
if version_solutions:
description += "\n\n" + "\n".join(version_solutions)
description += "\n"
self._description = description
@property
def solution_title(self) -> str:
return self._title
@property
def solution_description(self) -> str:
return self._description
@property
def documentation_links(self) -> List[str]:
return [
"https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies",
"https://python-poetry.org/docs/dependency-specification/#using-environment-markers",
] | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/mixology/solutions/solutions/python_requirement_solution.py | 0.849504 | 0.186539 | python_requirement_solution.py | pypi |
import urllib.parse
from pathlib import Path
from typing import Optional
from typing import Sequence
from typing import Union
from cleo.io.io import IO
from poetry.core.packages.utils.utils import path_to_url
from poetry.managed_project import ManagedProject
from poetry.utils._compat import decode
class Exporter:
"""
Exporter class to export a lock file to alternative formats.
"""
FORMAT_REQUIREMENTS_TXT = "requirements.txt"
#: The names of the supported export formats.
ACCEPTED_FORMATS = (FORMAT_REQUIREMENTS_TXT,)
ALLOWED_HASH_ALGORITHMS = ("sha256", "sha384", "sha512")
def __init__(self, poetry: ManagedProject) -> None:
self._poetry = poetry
def export(
self,
fmt: str,
cwd: Path,
output: Union[IO, str],
with_hashes: bool = True,
dev: bool = False,
extras: Optional[Union[bool, Sequence[str]]] = None,
with_credentials: bool = False,
) -> None:
if fmt not in self.ACCEPTED_FORMATS:
raise ValueError(f"Invalid export format: {fmt}")
getattr(self, "_export_{}".format(fmt.replace(".", "_")))(
cwd,
output,
with_hashes=with_hashes,
dev=dev,
extras=extras,
with_credentials=with_credentials,
)
def _export_requirements_txt(
self,
cwd: Path,
output: Union[IO, str],
with_hashes: bool = True,
dev: bool = False,
extras: Optional[Union[bool, Sequence[str]]] = None,
with_credentials: bool = False,
) -> None:
indexes = set()
content = ""
dependency_lines = set()
for dependency_package in self._poetry.locker.get_project_dependency_packages(
project_requires=self._poetry.package.all_requires, dev=dev, extras=extras
):
line = ""
dependency = dependency_package.dependency
package = dependency_package.package
if package.develop:
line += "-e "
requirement = dependency.to_pep_508(with_extras=False)
is_direct_local_reference = (
dependency.is_file() or dependency.is_directory()
)
is_direct_remote_reference = dependency.is_vcs() or dependency.is_url()
if is_direct_remote_reference:
line = requirement
elif is_direct_local_reference:
dependency_uri = path_to_url(dependency.source_url)
line = f"{dependency.name} @ {dependency_uri}"
else:
line = f"{package.name}=={package.version}"
if not is_direct_remote_reference:
if ";" in requirement:
markers = requirement.split(";", 1)[1].strip()
if markers:
line += f"; {markers}"
if (
not is_direct_remote_reference
and not is_direct_local_reference
and package.source_url
):
indexes.add(package.source_url)
if package.files and with_hashes:
hashes = []
for f in package.files:
h = f["hash"]
algorithm = "sha256"
if ":" in h:
algorithm, h = h.split(":")
if algorithm not in self.ALLOWED_HASH_ALGORITHMS:
continue
hashes.append(f"{algorithm}:{h}")
if hashes:
line += " \\\n"
for i, h in enumerate(hashes):
line += " --hash={}{}".format(
h, " \\\n" if i < len(hashes) - 1 else ""
)
dependency_lines.add(line)
content += "\n".join(sorted(dependency_lines))
content += "\n"
if indexes:
# If we have extra indexes, we add them to the beginning of the output
indexes_header = ""
for index in sorted(indexes):
repositories = [
r
for r in self._poetry.pool.repositories
if r.url == index.rstrip("/")
]
if not repositories:
continue
repository = repositories[0]
if (
self._poetry.pool.has_default()
and repository is self._poetry.pool.repositories[0]
):
url = (
repository.authenticated_url
if with_credentials
else repository.url
)
indexes_header = f"--index-url {url}\n"
continue
url = (
repository.authenticated_url if with_credentials else repository.url
)
parsed_url = urllib.parse.urlsplit(url)
if parsed_url.scheme == "http":
indexes_header += f"--trusted-host {parsed_url.netloc}\n"
indexes_header += f"--extra-index-url {url}\n"
content = indexes_header + "\n" + content
self._output(content, cwd, output)
def _output(self, content: str, cwd: Path, output: Union[IO, str]) -> None:
decoded = decode(content)
try:
output.write(decoded)
except AttributeError:
filepath = cwd / output
with filepath.open("w", encoding="utf-8") as f:
f.write(decoded) | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/utils/exporter.py | 0.735357 | 0.204501 | exporter.py | pypi |
import sys
from threading import RLock
from typing import Optional, Union, Iterable
from cleo.formatters.style import Style
from cleo.io.inputs.argv_input import ArgvInput
from cleo.io.io import IO
from cleo.io.null_io import NullIO
from cleo.io.outputs.output import Verbosity, Output
from cleo.io.outputs.stream_output import StreamOutput
from typing_extensions import Protocol
class Printer(Protocol):
def println(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL,
) -> None:
...
def print(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL, ) -> None:
...
def is_decorated(self) -> bool:
...
def as_output(self) -> Output:
...
def dynamic_line(self, prefix: str = "") -> "Printer":
...
class NullPrinter(Printer):
def print(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL, ) -> None:
pass
def println(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL,
) -> None:
pass
def is_decorated(self) -> bool:
return False
def as_output(self) -> Output:
return NullIO().output
def dynamic_line(self, prefix: str = "") -> "Printer":
return self
NullPrinter = NullPrinter()
class Console(Printer):
def __init__(self, io: Optional[IO] = None):
if io is None:
inpt = ArgvInput()
inpt.set_stream(sys.stdin)
io = IO(inpt, StreamOutput(sys.stdout), StreamOutput(sys.stderr))
self.io: Optional[IO] = None
self.set_io(io)
self._nlines = 0
self.out_lock = RLock()
def dynamic_line(self, prefix: str = "") -> Printer:
if not self.is_decorated():
self.println(prefix)
return self
result = DynamicLinePrinter(self._nlines, prefix)
self.println(prefix)
return result
def set_io(self, new_io: IO):
write_unwrap = new_io._output._write
def write_wrap(message: str, new_line: bool = False):
if new_line:
self._nlines += 1
self._nlines += message.count('\n')
return write_unwrap(message, new_line)
new_io._output._write = write_wrap
# Set our own CLI styles
formatter = new_io.output.formatter
formatter.set_style("c1", Style("cyan"))
formatter.set_style("c2", Style("default", options=["bold"]))
formatter.set_style("info", Style("blue"))
formatter.set_style("comment", Style("green"))
formatter.set_style("warning", Style("yellow"))
formatter.set_style("debug", Style("default", options=["dark"]))
formatter.set_style("success", Style("green"))
# Dark variants
formatter.set_style("c1_dark", Style("cyan", options=["dark"]))
formatter.set_style("c2_dark", Style("default", options=["bold", "dark"]))
formatter.set_style("success_dark", Style("green", options=["dark"]))
new_io.output.set_formatter(formatter)
new_io.error_output.set_formatter(formatter)
self.io = new_io
def print(
self, messages: Union[str, Iterable[str]],
verbosity: Verbosity = Verbosity.NORMAL, ) -> None:
with self.out_lock:
self.io.write(messages, verbosity=verbosity)
def println(
self, messages: Union[str, Iterable[str]] = "",
verbosity: Verbosity = Verbosity.NORMAL,
) -> None:
with self.out_lock:
self.io.write_line(messages, verbosity=verbosity)
def as_output(self) -> Output:
return self.io.output
def is_decorated(self) -> bool:
return self.io.is_decorated()
console = Console()
class DynamicLinePrinter(Printer):
def __init__(self, line_num: int, prefix: str):
self._line_num = line_num
self._prefix = prefix
def println(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL,
) -> None:
self.print(message, verbosity)
def print(
self, message: str,
verbosity: Verbosity = Verbosity.NORMAL, ) -> None:
with console.out_lock:
up = console._nlines - self._line_num
if up > 0:
console.print(f"\u001b[{up}A") # move up
console.print("\u001b[1000D\u001b[K") # move to start of line and clear it
if self._prefix:
console.print(self._prefix, verbosity)
console.print(message, verbosity)
if up > 0:
console.print(f"\u001b[{up}B\u001b[1000D") # move down and to the start of the line
def is_decorated(self) -> bool:
return console.is_decorated()
def dynamic_line(self, prefix: str = "") -> "Printer":
return DynamicLinePrinter(self._line_num, self._prefix + prefix)
def as_output(self) -> Output:
dout = console.as_output()
dself = self
class Out(Output):
def __init__(self):
super(Out, self).__init__(dout.verbosity, dout.is_decorated(), dout.formatter)
def _write(self, message: str, new_line: bool = False) -> None:
dself.println(message)
return Out()
if __name__ == '__main__':
console.println("<c1>hello</c1> <c2>world</c2>") | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/__init__.py | 0.718397 | 0.187244 | __init__.py | pypi |
from typing import List
from typing import Optional
from typing import Union
from cleo.io.inputs.argv_input import ArgvInput
from cleo.io.inputs.definition import Definition
class RunArgvInput(ArgvInput):
def __init__(
self, argv: Optional[List[str]] = None, definition: Optional[Definition] = None
) -> None:
super().__init__(argv, definition=definition)
self._parameter_options = []
@property
def first_argument(self) -> Optional[str]:
return "run"
def add_parameter_option(self, name: str) -> None:
self._parameter_options.append(name)
def has_parameter_option(
self, values: Union[str, List[str]], only_params: bool = False
) -> bool:
if not isinstance(values, list):
values = [values]
for token in self._tokens:
if only_params and token == "--":
return False
for value in values:
if value not in self._parameter_options:
continue
# Options with values:
# For long options, test for '--option=' at beginning
# For short options, test for '-o' at beginning
if value.find("--") == 0:
leading = value + "="
else:
leading = value
if token == value or leading != "" and token.find(leading) == 0:
return True
return False
def _parse(self) -> None:
parse_options = True
self._parsed = self._tokens[:]
try:
token = self._parsed.pop(0)
except IndexError:
token = None
while token is not None:
if parse_options and token == "":
self._parse_argument(token)
elif parse_options and token == "--":
parse_options = False
elif parse_options and token.find("--") == 0:
if token in self._parameter_options:
self._parse_long_option(token)
else:
self._parse_argument(token)
elif parse_options and token[0] == "-" and token != "-":
if token in self._parameter_options:
self._parse_short_option(token)
else:
self._parse_argument(token)
else:
self._parse_argument(token)
try:
token = self._parsed.pop(0)
except IndexError:
token = None | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/io/inputs/run_argv_input.py | 0.841679 | 0.354936 | run_argv_input.py | pypi |
import json
import re
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from cleo.helpers import argument
from cleo.helpers import option
from poetry.core.utils import toml
from poetry.config.config import Config
from .command import Command
if TYPE_CHECKING:
from poetry.config.config_source import ConfigSource
class ConfigCommand(Command):
name = "config"
description = "Manages configuration settings."
arguments = [
argument("key", "Setting key.", optional=True),
argument("value", "Setting value.", optional=True, multiple=True),
]
options = [
option("list", None, "List configuration settings."),
option("unset", None, "Unset configuration setting."),
option("local", None, "Set/Get from the project's local configuration."),
]
help = """This command allows you to edit the poetry config settings and repositories.
To add a repository:
<comment>poetry config repositories.foo https://bar.com/simple/</comment>
To remove a repository (repo is a short alias for repositories):
<comment>poetry config --unset repo.foo</comment>"""
LIST_PROHIBITED_SETTINGS = {"http-basic", "pypi-token"}
@property
def unique_config_values(self) -> Dict[str, Tuple[Any, Any, Any]]:
from pathlib import Path
from poetry.config.config import boolean_normalizer
from poetry.config.config import boolean_validator
from poetry.locations import CACHE_DIR
unique_config_values = {
"cache-dir": (
str,
lambda val: str(Path(val)),
str(Path(CACHE_DIR) / "virtualenvs"),
),
"virtualenvs.create": (boolean_validator, boolean_normalizer, True),
"virtualenvs.in-project": (boolean_validator, boolean_normalizer, True),
"virtualenvs.options.always-copy": (
boolean_validator,
boolean_normalizer,
False,
),
"virtualenvs.options.system-site-packages": (
boolean_validator,
boolean_normalizer,
False,
),
"virtualenvs.path": (
str,
lambda val: str(Path(val)),
str(Path(CACHE_DIR) / "virtualenvs"),
),
"experimental.new-installer": (
boolean_validator,
boolean_normalizer,
True,
),
"installer.parallel": (
boolean_validator,
boolean_normalizer,
True,
),
}
return unique_config_values
def handle(self) -> Optional[int]:
from pathlib import Path
from poetry.config.file_config_source import FileConfigSource
from poetry.core.pyproject.exceptions import PyProjectException
from poetry.locations import CONFIG_DIR
config = Config.load_global()
config_file = Path(CONFIG_DIR) / "config.toml"
try:
local_config_path = self.poetry.pyproject.project_management_files / "config.toml"
if local_config_path.exists():
local_config, _ = toml.load(self.poetry.pyproject.project_management_files / "config.toml")
config.merge(local_config)
except (RuntimeError, PyProjectException):
# TODO: not sure when this happens - need to check later
local_config_path = Path.cwd() / "etc/rp/config.toml"
if self.option("local"):
config.set_config_source(FileConfigSource(local_config_path))
if not config_file.exists():
config_file.parent.mkdir(parents=True, exist_ok=True)
config_file.touch(mode=0o0600)
if self.option("list"):
self._list_configuration(config.all(), config.raw())
return 0
setting_key = self.argument("key")
if not setting_key:
return 0
if self.argument("value") and self.option("unset"):
raise RuntimeError("You can not combine a setting value with --unset")
# show the value if no value is provided
if not self.argument("value") and not self.option("unset"):
m = re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key"))
if m:
if not m.group(1):
value = {}
if config.get("repositories") is not None:
value = config.get("repositories")
else:
repo = config.get("repositories.{}".format(m.group(1)))
if repo is None:
raise ValueError(
"There is no {} repository defined".format(m.group(1))
)
value = repo
self.line(str(value))
else:
values = self.unique_config_values
if setting_key not in values:
raise ValueError("There is no {} setting.".format(setting_key))
value = config.get(setting_key)
if not isinstance(value, str):
value = json.dumps(value)
self.line(value)
return 0
values = self.argument("value")
unique_config_values = self.unique_config_values
if setting_key in unique_config_values:
if self.option("unset"):
return config.config_source.remove_property(setting_key)
return self._handle_single_value(
config.config_source,
setting_key,
unique_config_values[setting_key],
values,
)
# handle repositories
m = re.match(r"^repos?(?:itories)?(?:\.(.+))?", self.argument("key"))
if m:
if not m.group(1):
raise ValueError("You cannot remove the [repositories] section")
if self.option("unset"):
repo = config.get("repositories.{}".format(m.group(1)))
if repo is None:
raise ValueError(
"There is no {} repository defined".format(m.group(1))
)
config.config_source.remove_property(
"repositories.{}".format(m.group(1))
)
return 0
if len(values) == 1:
url = values[0]
config.config_source.add_property(
"repositories.{}.url".format(m.group(1)), url
)
return 0
raise ValueError(
"You must pass the url. "
"Example: poetry config repositories.foo https://bar.com"
)
# handle auth
m = re.match(r"^(http-basic|pypi-token)\.(.+)", self.argument("key"))
if m:
from poetry.utils.password_manager import PasswordManager
password_manager = PasswordManager(config)
if self.option("unset"):
if m.group(1) == "http-basic":
password_manager.delete_http_password(m.group(2))
elif m.group(1) == "pypi-token":
password_manager.delete_pypi_token(m.group(2))
return 0
if m.group(1) == "http-basic":
if len(values) == 1:
username = values[0]
# Only username, so we prompt for password
password = self.secret("Password:")
elif len(values) != 2:
raise ValueError(
"Expected one or two arguments "
"(username, password), got {}".format(len(values))
)
else:
username = values[0]
password = values[1]
password_manager.set_http_password(m.group(2), username, password)
elif m.group(1) == "pypi-token":
if len(values) != 1:
raise ValueError(
"Expected only one argument (token), got {}".format(len(values))
)
token = values[0]
password_manager.set_pypi_token(m.group(2), token)
return 0
# handle certs
m = re.match(
r"(?:certificates)\.([^.]+)\.(cert|client-cert)", self.argument("key")
)
if m:
if self.option("unset"):
config.auth_config_source.remove_property(
"certificates.{}.{}".format(m.group(1), m.group(2))
)
return 0
if len(values) == 1:
config.auth_config_source.add_property(
"certificates.{}.{}".format(m.group(1), m.group(2)), values[0]
)
else:
raise ValueError("You must pass exactly 1 value")
return 0
raise ValueError("Setting {} does not exist".format(self.argument("key")))
def _handle_single_value(
self,
source: "ConfigSource",
key: str,
callbacks: Tuple[Any, Any, Any],
values: List[Any],
) -> int:
validator, normalizer, _ = callbacks
if len(values) > 1:
raise RuntimeError("You can only pass one value.")
value = values[0]
if not validator(value):
raise RuntimeError('"{}" is an invalid value for {}'.format(value, key))
source.add_property(key, normalizer(value))
return 0
def _list_configuration(self, config: Dict, raw: Dict, k: str = "") -> None:
orig_k = k
for key, value in sorted(config.items()):
if k + key in self.LIST_PROHIBITED_SETTINGS:
continue
raw_val = raw.get(key)
if isinstance(value, dict):
k += "{}.".format(key)
self._list_configuration(value, raw_val, k=k)
k = orig_k
continue
elif isinstance(value, list):
value = [
json.dumps(val) if isinstance(val, list) else val for val in value
]
value = "[{}]".format(", ".join(value))
if k.startswith("repositories."):
message = "<c1>{}</c1> = <c2>{}</c2>".format(
k + key, json.dumps(raw_val)
)
elif isinstance(raw_val, str) and raw_val != value:
message = "<c1>{}</c1> = <c2>{}</c2> # {}".format(
k + key, json.dumps(raw_val), value
)
else:
message = "<c1>{}</c1> = <c2>{}</c2>".format(k + key, json.dumps(value))
self.line(message)
def _get_setting(
self,
contents: Dict,
setting: Optional[str] = None,
k: Optional[str] = None,
default: Optional[Any] = None,
) -> List[Tuple[str, str]]:
orig_k = k
if setting and setting.split(".")[0] not in contents:
value = json.dumps(default)
return [((k or "") + setting, value)]
else:
values = []
for key, value in contents.items():
if setting and key != setting.split(".")[0]:
continue
if isinstance(value, dict) or key == "repositories" and k is None:
if k is None:
k = ""
k += re.sub(r"^config\.", "", key + ".")
if setting and len(setting) > 1:
setting = ".".join(setting.split(".")[1:])
values += self._get_setting(
value, k=k, setting=setting, default=default
)
k = orig_k
continue
if isinstance(value, list):
value = [
json.dumps(val) if isinstance(val, list) else val
for val in value
]
value = "[{}]".format(", ".join(value))
value = json.dumps(value)
values.append(((k or "") + key, value))
return values | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/config.py | 0.601477 | 0.169956 | config.py | pypi |
from poetry.console import console
from poetry.console.commands.command import Command
class InstallCommand(Command):
"""
install project dependencies
install
{--sync : Synchronize the environment with the locked packages and the specified groups.}
{--dry-run : Output the operations but do not execute anything (implicitly enables --verbose).}
{--extras=* : Extra sets of dependencies to install.}
{--update : update the given packages to the last compatible version }
{--lock-only : Do not perform operations (only update the lockfile). }
{--editable : Add vcs/path dependencies as editable.}
{--optional : add packages as an optional dependencies. }
{--python= : Python version for which the dependency must be installed. }
{--platform= : Platforms for which the dependency must be installed. }
{--source= : Name of the source to use to install the package.}
{--allow-prereleases : Accept prereleases.}
{packages?* : The packages to add. }
"""
help = """
install project dependencies
If you do not specify a version constraint, rp will choose a suitable one based on the available package versions.
You can specify a package in the following forms:
- A single name (<b>requests</b>)
- A name and a constraint (<b>requests@^2.23.0</b>)
- A git url (<b>git+https://github.com/python-poetry/poetry.git</b>)
- A git url with a revision (<b>git+https://github.com/python-poetry/poetry.git#develop</b>)
- A git SSH url (<b>git+ssh://github.com/python-poetry/poetry.git</b>)
- A git SSH url with a revision (<b>git+ssh://github.com/python-poetry/poetry.git#develop</b>)
- A file path (<b>../my-package/my-package.whl</b>)
- A directory (<b>../my-package/</b>)
- A url (<b>https://example.com/packages/my-package-0.1.0.tar.gz</b>)
"""
def handle(self) -> int:
from poetry.app.relaxed_poetry import rp
project = rp.active_project
for subp in project.projects_graph():
if subp.env:
subp.install(
self.argument("packages"),
synchronize=self.option("sync"),
dry_run=self.option("dry-run"),
extras_strings=self.option("extras"),
update=self.option("update"),
lock_only=self.option("lock-only"),
editable=self.option("editable"),
optional=self.option("optional"),
python=self.option("python"),
platform=self.option("platform"),
source=self.option("source"),
allow_prereleases=self.option("allow-prereleases")
)
else:
console.println(
f"<info>Skipping {subp.pyproject.name}, it does not requires python interpreter and therefore cannot have dependencies.</>\n"
"To change that, add a python dependency to <c1>pyproject.toml</c1>")
return 0 | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/install.py | 0.68342 | 0.28929 | install.py | pypi |
from typing import TYPE_CHECKING
from cleo.helpers import argument
from cleo.helpers import option
from poetry.core.pyproject.tables import POETRY_TABLE
from poetry.core.utils.collections import nested_dict_get
from .command import Command
if TYPE_CHECKING:
from poetry.core.semver.version import Version
class VersionCommand(Command):
name = "version"
description = (
"Shows the version of the project or bumps it when a valid "
"bump rule is provided."
)
arguments = [
argument(
"version",
"The version number or the rule to update the version.",
optional=True,
)
]
options = [option("short", "s", "Output the version number only")]
help = """\
The version command shows the current version of the project or bumps the version of
the project and writes the new version back to <comment>pyproject.toml</> if a valid
bump rule is provided.
The new version should ideally be a valid semver string or a valid bump rule:
patch, minor, major, prepatch, preminor, premajor, prerelease.
"""
RESERVED = {
"major",
"minor",
"patch",
"premajor",
"preminor",
"prepatch",
"prerelease",
}
def handle(self) -> None:
version = self.argument("version")
if version:
version = self.increment_version(
self.poetry.package.pretty_version, version
)
if self.option("short"):
self.line("{}".format(version))
else:
self.line(
"Bumping version from <b>{}</> to <fg=green>{}</>".format(
self.poetry.package.pretty_version, version
)
)
with self.poetry.pyproject.edit() as data:
nested_dict_get(data, POETRY_TABLE)['version'] = version.text
else:
if self.option("short"):
self.line("{}".format(self.poetry.package.pretty_version))
else:
self.line(
"<comment>{}</> <info>{}</>".format(
self.poetry.package.name, self.poetry.package.pretty_version
)
)
def increment_version(self, version: str, rule: str) -> "Version":
from poetry.core.semver.version import Version
try:
version = Version.parse(version)
except ValueError:
raise ValueError("The project's version doesn't seem to follow semver")
if rule in {"major", "premajor"}:
new = version.next_major()
if rule == "premajor":
new = new.first_prerelease()
elif rule in {"minor", "preminor"}:
new = version.next_minor()
if rule == "preminor":
new = new.first_prerelease()
elif rule in {"patch", "prepatch"}:
new = version.next_patch()
if rule == "prepatch":
new = new.first_prerelease()
elif rule == "prerelease":
if version.is_unstable():
new = Version(version.epoch, version.release, version.pre.next())
else:
new = version.next_patch().first_prerelease()
else:
new = Version.parse(rule)
return new | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/version.py | 0.675229 | 0.196614 | version.py | pypi |
from pathlib import Path
from typing import Optional
from cleo.helpers import option
from cleo.ui.confirmation_question import ConfirmationQuestion
from cleo.ui.question import Question
from .command import Command
from .. import console
class PublishCommand(Command):
name = "publish"
description = "Publishes a package to a remote repository."
options = [
option(
"repository", "r", "The repository to publish the package to.", flag=False
),
option("username", "u", "The username to access the repository.", flag=False),
option("password", "p", "The password to access the repository.", flag=False),
option(
"cert", None, "Certificate authority to access the repository.", flag=False
),
option(
"client-cert",
None,
"Client certificate to access the repository.",
flag=False,
),
option("build", None, "Build the package before publishing."),
option("dry-run", None, "Perform all actions except upload the package."),
]
help = """The publish command builds and uploads the package to a remote repository.
By default, it will upload to PyPI but if you pass the --repository option it will
upload to it instead.
The --repository option should match the name of a configured repository using
the config command.
"""
loggers = ["poetry.masonry.publishing.publisher"]
def handle(self) -> Optional[int]:
from poetry.publishing.publisher import Publisher
cred_completer = _CredentialCompleter().complete if self.poetry.pyproject.is_parent() else None
for poetry in self.poetry.projects_graph():
if poetry.env is None:
continue
publisher = Publisher(poetry, self.io, user_credential_completer=cred_completer)
# Building package first, if told
if self.option("build"):
if publisher.files:
if not self.confirm(
"There are <info>{}</info> files ready for publishing. "
"Build anyway?".format(len(publisher.files))
):
self.line_error("<error>Aborted!</error>")
return 1
self.call("build")
files = publisher.files
if not files:
self.line_error(
"<error>No files to publish. "
"Run poetry build first or use the --build option.</error>"
)
return 1
self.line("")
cert = Path(self.option("cert")) if self.option("cert") else None
client_cert = (
Path(self.option("client-cert")) if self.option("client-cert") else None
)
publisher.publish(
self.option("repository"),
self.option("username"),
self.option("password"),
cert,
client_cert,
self.option("dry-run"),
)
class _CredentialCompleter:
def __init__(self):
self.reuse_cred = None
self.username = None
self.password = None
def complete(self, username, password):
if self.reuse_cred is None or not self.reuse_cred:
if username is None:
username = Question("Username:").ask(console.io)
# skip password input if no username is provided, assume unauthenticated
if username and password is None:
qpassword = Question("Password:")
qpassword.hide(True)
password = qpassword.ask(console.io)
self.username = username
self.password = password
if self.reuse_cred is None:
self.reuse_cred = ConfirmationQuestion(
"Should I Use these credentials for all other sub projects?").ask(console.io)
return self.username, self.password | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/publish.py | 0.788909 | 0.190159 | publish.py | pypi |
from typing import Optional
from typing import TYPE_CHECKING
from cleo.helpers import argument
from cleo.helpers import option
from cleo.io.outputs.output import Verbosity
from ..init import InitCommand
from ... import NullPrinter
if TYPE_CHECKING:
from poetry.console.commands.show import ShowCommand
class DebugResolveCommand(InitCommand):
name = "debug resolve"
description = "Debugs dependency resolution."
arguments = [
argument("package", "The packages to resolve.", optional=True, multiple=True)
]
options = [
option(
"extras",
"E",
"Extras to activate for the dependency.",
flag=False,
multiple=True,
),
option("python", None, "Python version(s) to use for resolution.", flag=False),
option("tree", None, "Display the dependency tree."),
option("install", None, "Show what would be installed for the current system."),
]
loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> Optional[int]:
from poetry.core.packages.project_package import ProjectPackage
from poetry.factory import Factory
from poetry.puzzle import Solver
from poetry.repositories.pool import Pool
from poetry.repositories.repository import Repository
packages = self.argument("package")
if not packages:
package = self.poetry.package
else:
# Using current pool for determine_requirements()
self._pool = self.poetry.pool
package = ProjectPackage(
self.poetry.package.name, self.poetry.package.version
)
# Silencing output
verbosity = self.io.output.verbosity
self.io.output.set_verbosity(Verbosity.QUIET)
requirements = self._determine_requirements(packages)
self.io.output.set_verbosity(verbosity)
for constraint in requirements:
name = constraint.pop("name")
extras = []
for extra in self.option("extras"):
if " " in extra:
extras += [e.strip() for e in extra.split(" ")]
else:
extras.append(extra)
constraint["extras"] = extras
package.add_dependency(Factory.create_dependency(name, constraint))
package.python_versions = self.option("python") or (
self.poetry.package.python_versions
)
env = self.poetry.env
solver = Solver(self.poetry, Repository(), Repository(), package=package)
ops = solver.solve().calculate_operations()
self.line("")
self.line("Resolution results:")
self.line("")
if self.option("tree"):
show_command: ShowCommand = self.application.find("show")
show_command.init_styles(self.io)
packages = [op.package for op in ops]
repo = Repository(packages)
requires = package.all_requires
for pkg in repo.packages:
for require in requires:
if pkg.name == require.name:
show_command.display_package_tree(self.io, pkg, repo)
break
return 0
table = self.table([], style="compact")
table.style.set_vertical_border_chars("", " ")
rows = []
if self.option("install"):
pool = Pool()
locked_repository = Repository()
for op in ops:
locked_repository.add_package(op.package)
pool.add_repository(locked_repository)
solver = Solver(self.poetry, Repository(), Repository(), printer=NullPrinter, package=package)
with solver.use_environment(env):
ops = solver.solve().calculate_operations()
for op in ops:
if self.option("install") and op.skipped:
continue
pkg = op.package
row = [
"<c1>{}</c1>".format(pkg.complete_name),
"<b>{}</b>".format(pkg.version),
"",
]
if not pkg.marker.is_any():
row[2] = str(pkg.marker)
rows.append(row)
table.set_rows(rows)
table.render() | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/debug/resolve.py | 0.786295 | 0.189765 | resolve.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.