code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from sqlalchemy import create_engine
import numpy as np
def get_engine(dbms:str, username:str, password:str, host:str, port:int, database:str):
"""
Parameter
----------
dbms : {"mysql", "postgres"}
"""
assert dbms in ["mysql", "postgresql"], "available dbms values are mysql and postgresql only"
if dbms == "mysql":
scheme = "mysql+pymysql"
elif dbms == "postgresql":
scheme = f"postgresql"
connection_url = f"{scheme}://{username}:{password}@{host}:{port}/{database}"
engine = create_engine(url=connection_url)
return engine
def upload_df(dbms:str, engine, df, pk_list:list, scheme:str, table:str):
"""
Parameter
-----------
dbms : {"mysql", "postgres"}
engine
df : pd.DataFrame()
pk_list : list of str
scheme : str
table : str
"""
copy_df = df.copy()
# NaN 값 처리
copy_df.replace({np.nan: None}, inplace=True)
if dbms == "mysql":
cols = ', '.join('`{0}`'.format(c) for c in copy_df.columns)
strings = ', '.join('%s' for i in copy_df.columns)
update_values = ', '.join('`{0}`=VALUES(`{0}`)'.format(c) for c in copy_df.drop(columns=pk_list).columns)
values = list(copy_df.itertuples(index=False, name=None))
sql = "INSERT INTO `" + scheme + "`.`" + table + "` ({0}) VALUES({1}) ON DUPLICATE KEY UPDATE {2};".format(cols, strings, update_values)
elif dbms == "postgres":
cols = ', '.join('"{0}"'.format(c) for c in copy_df.columns)
strings = ', '.join('%s' for i in copy_df.columns)
pk_values = ', '.join('"{0}"'.format(c) for c in pk_list)
update_values = ', '.join('"{0}"=EXCLUDED."{0}"'.format(c) for c in copy_df.drop(columns=pk_list).columns)
values = list(copy_df.itertuples(index=False, name=None))
sql = 'INSERT INTO ' + scheme + '."' + table + '" ({0}) VALUES({1}) ON CONFLICT ({2}) DO UPDATE SET {3};'.format(cols, strings, pk_values, update_values)
else:
raise ValueError("dbms 명이 잘못되었습니다")
try:
with engine.connect() as connection:
connection.execute(sql, values)
return True
except Exception as e:
print(e)
return False | /sa_package-0.0.80-py3-none-any.whl/sa_package/mydatabase/__init__.py | 0.497315 | 0.353261 | __init__.py | pypi |
import typing as t
import more_itertools
import sqlalchemy as sa
from sqlalchemy import ColumnElement
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import DeclarativeBase, Session, joinedload
__all__ = ['BaseRepository']
T = t.TypeVar('T', bound=DeclarativeBase)
class BaseRepository(t.Generic[T]):
"""
Base repository class
Exceptions are raised from sqlalchemy.exc
Every session operations are flushed
"""
REGISTRY: dict[str, t.Type['BaseRepository[T]']] = {}
MODEL_CLASS: t.Type[T]
BATCH_SIZE: int = 1000
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
if cls.MODEL_CLASS.__name__ in BaseRepository.REGISTRY:
raise KeyError(f'Repository for model {cls.MODEL_CLASS.__name__} already exists in registry')
BaseRepository.REGISTRY[cls.MODEL_CLASS.__name__] = cls
def __init__(self, session: Session):
self.session = session
@classmethod
def get_repository_from_model(cls, session, model: t.Type[T]) -> 'BaseRepository[T]':
if model.__name__ in BaseRepository.REGISTRY:
return BaseRepository.REGISTRY[model.__name__](session)
new_repo = cls(session)
new_repo.MODEL_CLASS = model
return new_repo
def _convert_params_to_model_fields(self, **params) -> list[ColumnElement]:
result = []
for name, value in params.items():
field = getattr(self.MODEL_CLASS, name)
result.append(t.cast(ColumnElement, field == value))
return result
def _validate_type(self, instances: list[T]) -> bool:
if len(instances) > self.BATCH_SIZE:
raise ValueError('Batch size exceeded')
if not all([isinstance(instance, self.MODEL_CLASS) for instance in instances]):
raise ValueError(f'Not all models are instance of class {self.MODEL_CLASS.__name__}')
return True
def _flush_obj(self, obj):
self.session.add(obj)
with self.session.begin_nested():
self.session.flush()
def get_or_create(self, **params) -> tuple[T, bool]:
try:
return self.get(*self._convert_params_to_model_fields(**params)), False
except NoResultFound:
return self.create(**params), True
def get_query(
self,
*where_args: ColumnElement,
joins: list | None = None,
select: t.Tuple[ColumnElement] | None = None,
order_by=None,
joined_loads: tuple | None = None,
) -> sa.Select:
query = sa.select(*select) if select else sa.select(self.MODEL_CLASS)
query = query.where(*where_args).order_by(order_by)
if joins:
for join in joins:
query = query.join(*join) if isinstance(join, tuple) else query.join(join)
if joined_loads:
query = query.options(*[joinedload(j) for j in joined_loads])
return query
# read methods
def get(self, *where: ColumnElement, joins: list | None = None, joined_loads: tuple | None = None) -> T:
"""
:returns: one
:raises NoResultFound: if nothing was found
:raises MultipleResultsFound: if found more than one record
"""
stmt = self.get_query(*where, joins=joins, joined_loads=joined_loads)
return self.session.scalars(stmt).unique().one()
def get_or_none(
self, *where: ColumnElement, joins: list | None = None, joined_loads: tuple | None = None
) -> T | None:
stmt = self.get_query(*where, joins=joins, joined_loads=joined_loads)
return self.session.scalars(stmt).unique().one_or_none()
def find(
self, *where, joins: list | None = None, order_by=None, joined_loads: tuple | None = None
) -> t.Sequence[T]:
stmt = self.get_query(*where, joins=joins, order_by=order_by, joined_loads=joined_loads)
return self.session.scalars(stmt).unique().all()
# write methods
def create(self, **params) -> T:
obj = self.MODEL_CLASS(**params)
self._flush_obj(obj)
return obj
def create_batch(self, instances: list[T]) -> list[T]:
for chunk in more_itertools.chunked(instances, self.BATCH_SIZE):
with self.session.begin_nested():
self._validate_type(chunk)
self.session.add_all(chunk)
self.session.flush()
return instances
def create_batch_from_dicts(self, data: list[dict]) -> list[T]:
instances = []
for chunk in more_itertools.chunked(data, self.BATCH_SIZE):
result = [self.create(**item) for item in chunk]
instances.extend(result)
return instances | /sa_repository-1.1.0.tar.gz/sa_repository-1.1.0/sa_repository/base.py | 0.466603 | 0.195287 | base.py | pypi |
from __future__ import print_function
import time
from datetime import datetime
from elasticsearch import Elasticsearch, __version__ as es_client_version
class ESQuery(object):
def __init__(self, es_hosts, index_prefix, timestamp_field, index_time_format='%Y.%m.%d', es_user='', es_passwd=''):
self.client = Elasticsearch(hosts=es_hosts, http_auth=(es_user, es_passwd)) if es_user and es_passwd else Elasticsearch(hosts=es_hosts)
self.index_prefix = index_prefix
self.timestamp_field = timestamp_field
self.index_time_format = index_time_format
def get_mapping(self, fields=None):
"""Retrieve mapping definition of index or specific field.
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html"""
indexes = self.compute_indexes(*self.compute_start_end_timestamp(0, 0))
if fields:
res = self.client.indices.get_field_mapping(index=indexes,
fields=fields)
else:
res = self.client.indices.get_mapping(index=indexes)
return res
def compute_start_end_timestamp(self, start, duration):
if isinstance(start, int):
now_timestamp = int(time.time())
start_timestamp = now_timestamp + start * 60
else:
start_timestamp = int(time.mktime(start.timetuple()))
end_timestamp = start_timestamp + duration * 60
return start_timestamp, end_timestamp
def compute_time_range(self, start_timestamp, end_timestamp):
time_range = {self.timestamp_field: {'gte': start_timestamp,
'lte': end_timestamp,
'format': "epoch_second"}}
return time_range
def compute_indexes(self, start_timestamp, end_timestamp):
start_utc_datetime = datetime.utcfromtimestamp(start_timestamp)
end_utc_datetime = datetime.utcfromtimestamp(end_timestamp)
ordinal_range = range(start_utc_datetime.toordinal(), end_utc_datetime.toordinal() + 1)
indexes = ','.join('%s%s' % (self.index_prefix,
datetime.fromordinal(i).strftime(self.index_time_format))
for i in ordinal_range)
return indexes
def make_body(self, time_range=None, query_string=None, term_dict=None,
aggregations=None, sort=None,
**kw):
must_list = [{"range": time_range}]
if term_dict:
must_list += [{"term": {k: v}} for k, v in term_dict.items()]
body = {
"query": {
"bool": {
"must": {
"query_string": query_string,
},
"filter": {
"bool": {
"must": must_list,
# "must_not": [],
}
}
}
},
"aggs": aggregations,
"sort": sort,
}
return body
@staticmethod
def _filter_None(obj):
if obj is None or isinstance(obj, (int, str)):
return obj
if isinstance(obj, list):
return [ESQuery._filter_None(i) for i in obj if ESQuery._filter_None(i)]
return {k: ESQuery._filter_None(v) for k, v in obj.items() if ESQuery._filter_None(v)}
def query(self, lucene_query_string="*", term_dict=None, aggregations=None, sort=None,
start=-15, duration=15, size=0):
start_timestamp, end_timestamp = self.compute_start_end_timestamp(start, duration)
time_range = self.compute_time_range(start_timestamp, end_timestamp)
indexes = self.compute_indexes(start_timestamp, end_timestamp)
if lucene_query_string and lucene_query_string != '*':
query_string = dict(query=lucene_query_string,
analyze_wildcard=True)
else:
query_string = None
body = self.make_body(time_range=time_range, query_string=query_string,
term_dict=term_dict,
aggregations=aggregations, sort=sort)
body = ESQuery._filter_None(body)
params = dict(index=indexes,
size=size,
body=body)
res = self.client.search(**params)
return res
if __name__ == '__main__':
body = {
"query": {
"bool": {
"must": {
"query_string": 'test',
},
"filter": {
"bool": {
"must": [
{"range": 'yes'},
{"term": None},
],
"must_not": [],
}
}
}
},
"aggs": None,
}
print(ESQuery._filter_None(body)) | /sa_tools_core-0.6.0-py3-none-any.whl/sa_tools_core/libs/es.py | 0.675872 | 0.285167 | es.py | pypi |
from collections import deque
class Sa2SeedKey:
register: int
carry_flag: int = 0
instruction_tape: bytearray
instruction_pointer: int = 0
for_pointers: deque = deque()
for_iterations: deque = deque()
def __init__(self, instruction_tape, seed):
self.instruction_tape = instruction_tape
self.register = seed
def rsl(self):
self.carry_flag = self.register & 0x80000000
self.register = self.register << 1
if(self.carry_flag):
self.register = self.register | 0x1
self.register = self.register & 0xFFFFFFFF
self.instruction_pointer += 1
def rsr(self):
self.carry_flag = self.register & 0x1
self.register = self.register >> 1
if(self.carry_flag):
self.register = self.register | 0x80000000
self.instruction_pointer += 1
def add(self):
self.carry_flag = 0
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 5]
add_int = operands[0] << 24 | operands[1] << 16 | operands[2] << 8 | operands[3]
output_register = self.register + add_int
if (output_register > 0xffffffff):
self.carry_flag = 1
output_register = output_register & 0xffffffff
self.register = output_register
self.instruction_pointer += 5
def sub(self):
self.carry_flag = 0
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 5]
sub_int = operands[0] << 24 | operands[1] << 16 | operands[2] << 8 | operands[3]
output_register = self.register - sub_int
if (output_register < 0):
self.carry_flag = 1
output_register = output_register & 0xffffffff
self.register = output_register
self.instruction_pointer += 5
def eor(self):
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 5]
xor_int = operands[0] << 24 | operands[1] << 16 | operands[2] << 8 | operands[3]
self.register = self.register ^ xor_int
self.instruction_pointer += 5
def for_loop(self):
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 2]
self.for_iterations.appendleft(operands[0] - 1)
self.instruction_pointer += 2
self.for_pointers.appendleft(self.instruction_pointer)
def next_loop(self):
if(self.for_iterations[0] > 0):
self.for_iterations[0] -= 1
self.instruction_pointer = self.for_pointers[0]
else:
self.for_iterations.popleft()
self.for_pointers.popleft()
self.instruction_pointer += 1
# bcc = branch conditional
def bcc(self):
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 2]
skip_count = operands[0] + 2
if(self.carry_flag == 0):
self.instruction_pointer += skip_count
else:
self.instruction_pointer += 2
# bra = branch unconditional
def bra(self):
operands = self.instruction_tape[self.instruction_pointer + 1:self.instruction_pointer + 2]
skip_count = operands[0] + 2
self.instruction_pointer += skip_count
def finish(self):
self.instruction_pointer += 1
def execute(self):
instruction_set = {
0x81 : self.rsl,
0x82 : self.rsr,
0x93 : self.add,
0x84 : self.sub,
0x87 : self.eor,
0x68 : self.for_loop,
0x49 : self.next_loop,
0x4A : self.bcc,
0x6B : self.bra,
0x4C : self.finish
}
while(self.instruction_pointer < len(self.instruction_tape)):
instruction_set[self.instruction_tape[self.instruction_pointer]]()
return self.register | /sa2-seed-key-0.0.1.tar.gz/sa2-seed-key-0.0.1/sa2_seed_key/sa2_seed_key.py | 0.425844 | 0.261794 | sa2_seed_key.py | pypi |
# Saa
> _Making Time Speak!_ 🎙️
Translating time into human-friendly spoken expressions

**Saa** allows you to effortlessly translate time into human-friendly spoken expressions. The word `saa` means `hour` in Swahili, and this package aims to humanify time expression across languages. It is build using pure Python standard libraries.
```python
from datetime import time
from saa import Clock
clock = Clock("en")
clock("11:15") # 'quarter past eleven'
ur = Clock("da")
t = time(hour=7, minute=30)
ur(t) # 'halvotte'
```
## Features
- Convert time into spoken expressions in various languages.
- Easy-to-use API with a simple and intuitive design.
- Pure Python implementation with no external dependencies.
- Extensible architecture for adding support for additional languages using the plugin design pattern.
- Compatible with Python 3.8 and higher.
## Installation
You can install **Saa** using pip:
```shell
pip install -U saa
```
## Quick Start
Here's a simple example to get you started:
```python
from saa import Clock
# Create a Clock instance with the desired language (e.g., "en" for English)
clock = Clock("en")
# Translate time into a human-friendly spoken expression
# supports also datetime and time. .e.g. time(hour=11, minute=45)
spoken_time = clock("11:45")
print(spoken_time)
# Output: "quarter to twelve"
```
<details>
<summary>Using Saa with LangChain 🦜🔗</summary>
```python
from datetime import datetime
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain import SerpAPIWrapper
from saa import Clock
search = SerpAPIWrapper()
clock = Clock("en")
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="Saa",
func=lambda x: f"It is {clock(datetime.now())}",
description=("A Current Timer teller. Use this more s about what is current "
"time, like 'what time is it?' or 'what is the current clock?'"),
return_direct=False,
),
]
agent = initialize_agent(
tools,
OpenAI(temperature=0),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
if __name__ == "__main__":
user_input = input("Human: ")
print(agent.run(user_input))
```
Example:
Prompt: `How many minutes are left before it is a quarter past twelve? Think step by step`

</details>
<details>
<summary>Adding New Language</summary>
Using `Kiswahili` as an example as ABC of how to add a new language
1. Create a folder with under the `saa/luga` directory, using the ISO 639-1 language code
```bash
mkdir saa/luga/sw && touch saa/luga/sw/__init__.py
```
2. Contents of `__init__.py` must have the following pattern
```python
from dataclasses import dataclass
from saa.core.language import Luga
@dataclass(init=False, eq=False, repr=False, frozen=False)
class LanguageName(Luga):
...
class Language(LanguageName):
pass
```
So for `Swahili` the skeleton of `saa/luga/sw/__init__.py` would be:
```python
...
@dataclass(init=False, eq=False, repr=False, frozen=False)
class Swahili(Luga):
...
class Language(Swahili):
pass
...
```
Since we are implementing `Luga`, our tasks now is to implement both the properties (`time`, `number_connector`, `connect_format`) and static methods (`time_logic`, `post_logic`).
In Swahili `class`, time is expressed in terms of hour and then minutes. time_indicator
```python
time = {
"to": "saa {hour} na dakika {minute} time_indicator",
"past": "saa {hour} kasoro dakika {minute}time_indicator",
0: "saa {hour} time_indicator",
15: "saa {hour} na robo time_indicator",
45: "saa {hour} kasorobo time_indicator",
30: "saa {hour} na nusu time_indicator",
}
```
The numbers connector is Swahili is `na`, and the connection format is "{tens_digits @ index 0} {[number_connector] @ index 1} {ones_digits @ index 2}"
```python
number_connector = "na"
connect_format = "{0} {1} {2}"
```
Given the implementations of Numbers converter, will include 11-19 even though we could deduced them as 20-50s. The numbers are as following:
```python
numbers = {
0: "sifuri",
1: "moja",
2: "mbili",
3: "tatu",
4: "nne",
5: "tano",
6: "sita",
7: "saba",
8: "nane",
9: "tisa",
10: "kumi",
11: "kumi na moja",
12: "kumi na mbili",
13: "kumi na tatu",
14: "kumi na nne",
15: "kumi na tano",
16: "kumi na sita",
17: "kumi na saba",
18: "kumi na nane",
19: "kumi na tisa",
20: "ishirini",
30: "thelathini",
40: "arobaini",
50: "hamsini",
}
```
The major task is on time logic. In Swahili, 7 AM is the first hour in the morning, while 7 PM is the first hour in the evenning (jioni). 6 AM is the 12th hour in the morning ( asubuhi), while 6 PM is the 12th hour in the evenning.
```
"""
0 - 11 asubuhi
12 - 15 mchana
16 - 19 jioni
20 - 23 usiku
"""
day_divisions = {
0: "asubuhi",
1: "asubuhi",
2: "asubuhi",
3: "asubuhi",
4: "asubuhi",
5: "asubuhi",
6: "asubuhi",
7: "asubuhi",
8: "asubuhi",
9: "asubuhi",
10: "asubuhi",
11: "asubuhi",
12: "mchana",
13: "mchana",
14: "mchana",
15: "mchana",
16: "jioni",
17: "jioni",
18: "jioni",
19: "jioni",
20: "usiku",
21: "usiku",
22: "usiku",
23: "usiku",
}
@staticmethod
def post_logic(text: str) -> str:
return text
```
Time to write tests ...
</details>
## Supported Languages
**Saa** currently supports the following languages:
- English (`en`)
- Danish (`da`)
- Swahile(`sw`)
Coming ...
- French (`fr`)
- Spanish (`es`)
- German (`de`)
- Italian (`it`)
## Extending Language Support
One of the key strengths of **Saa** is its extensible architecture, allowing you to easily add support for additional languages. To add a new language, follow these steps:
1. Create a new directory under the `saa/luga` directory, using the ISO 639-1 language code as the filename (e.g., `fr` for French) and create a Python `__init__.py`.
2. Implement the necessary functions in the new file to translate time into spoken expressions of the target language.
3. Test the new language integration thoroughly to ensure accurate and reliable translations.
4. Consider submitting a pull request to contribute your new language support to the main **Saa** repository.
We welcome contributions from the community to expand language support and make **Saa** even more versatile!
## Contributing
If you'd like to contribute to **Saa**, please follow the guidelines outlined in the [CONTRIBUTING.md](https://github.com/your-username/saa/blob/main/CONTRIBUTING.md) file. We appreciate your help in making this package better.
## License
**Saa** is released under the [MIT License](https://github.com/your-username/saa/blob/main/LICENSE). Feel free to use, modify, and distribute this package as per the terms of the license.
## Acknowledgments
I would like to express our future gratitude to the developers of French, Spanish, German, and Italian language plugins for their valuable contributions to the **Saa** package. 🤣
## Contact
For any questions, suggestions, or feedback, please reach out to our team at praysonpi<at>gmail.com.
Let **Saa** simplify time for you and enhance the way you communicate it across languages!
| /saa-0.0.5.tar.gz/saa-0.0.5/README.md | 0.747708 | 0.864996 | README.md | pypi |
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.events import UserUtteranceReverted
from rasa_sdk.executor import CollectingDispatcher
from pprint import pprint
import logging
import json
class ActionHelloWorld(Action):
def name(self) -> Text:
return "action_hello_world"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message("Hello World!")
return []
class ActionIsBot(Action):
"""Revertible mapped action for utter_is_bot"""
def name(self):
return "action_is_bot"
def run(self, dispatcher: CollectingDispatcher,
tracker:Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dump_slots(tracker)
dispatcher.utter_template("utter_iamabot", tracker)
return [UserUtteranceReverted()]
def prop(tracker:Tracker, attr:Text):
attr_val = tracker.get_slot(attr) if attr in tracker.slots else ''
return attr_val
def dump_slots(tracker):
logging.info('.. slots')
logging.info(json.dumps(tracker.current_slot_values(), indent=2, ensure_ascii=False))
class ActionLogCommEvent(Action):
def name(self):
return "action_log_commevent"
def run(self, dispatcher: CollectingDispatcher,
tracker:Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dump_slots(tracker)
dispatcher.utter_message(json_message={'result': 'log ok',
'sents':prop(tracker, 'sents')})
return [UserUtteranceReverted()]
class ActionPerformMedia(Action):
def name(self):
return "action_perform_media"
def run(self, dispatcher: CollectingDispatcher,
tracker:Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
logging.info(json.dumps(tracker.current_slot_values(), indent=2, ensure_ascii=False))
prop=lambda attr: tracker.get_slot(attr) if attr in tracker.slots else ''
dispatcher.utter_message(json_message={'result': 'success',
'media_list': ['first song', 'second song'],
'media_type': tracker.get_slot('object_type'),
'sents':prop('sents')})
return [] | /saai-0.3.0-py3-none-any.whl/bots/genesis/actions/actions.py | 0.699665 | 0.207415 | actions.py | pypi |
from typing import Text, Any, Dict, List
from sagas.nlu.inspector_common import Inspector, Context
import logging
logger = logging.getLogger(__name__)
# 代码整理自notebook: procs-rasa-entity-iob.ipynb
def get_entities(sents:Text):
from saai.tool import rasa_nlu_parse
result = rasa_nlu_parse(sents, 'http://localhost:15008')
ents = result['entities']
return ents
def get_entity_mapping(sents, doc, ents):
running_offset = 0
rs = []
for token in doc.words:
word = token.text
word_offset = sents.find(word, running_offset)
if word_offset>-1:
word_len = len(word)
running_offset = word_offset + word_len
logger.debug(f"{word} - ({word_offset}, {running_offset})")
for ent in ents:
start, end=ent['start'], ent['end']
if word_offset>=start and running_offset<=end:
rs.append({"start": word_offset,
"end": running_offset,
'index': token.index,
'value': word,
'entity': ent['entity']
})
return rs
def get_children_index(sent, word_idx):
from sagas.nlu.corenlp_parser import get_children
rs = []
word=next(filter(lambda w: w.index == word_idx, sent.words))
get_children(sent, word, rs, stem=False)
return [word_idx]+[w[0] for w in rs]
class CustEntityInspector(Inspector):
"""
Inspector是所有inspector的基类
"""
def __init__(self, test_ent):
# self.arg = arg
self.test_ent=test_ent
def name(self):
return "cust_ents"
def run(self, key, ctx: Context):
from sagas.nlu.ruleset_procs import list_words, cached_chunks, get_main_domains
from sagas.conf.conf import cf
logger.debug(f".. check against {key}")
if key not in ctx.indexes:
return False
# lemma = ctx.lemmas[key]
sents = ctx.sents
lang = ctx.lang
chunks = cached_chunks(sents, lang, cf.engine(lang))
doc=chunks['doc']
ents=get_entities(sents)
prt=ctx.indexes[key]
indexes = get_children_index(doc, prt)
idx_ent = {el['index']: el['entity'] for el in get_entity_mapping(sents, doc, ents)}
children_ents = [(idx, idx_ent[idx] if idx in idx_ent else '_') for idx in indexes]
result= self.test_ent in {e[1] for e in children_ents}
if result:
ctx.add_result(self.name(), 'default', key, idx_ent)
return result
def __str__(self):
return f"ins_{self.name()}({self.test_ent})" | /saai-0.3.0-py3-none-any.whl/bots/agent_dispatcher/inspectors/cust_entity_inspector.py | 0.494629 | 0.220437 | cust_entity_inspector.py | pypi |
```
import pyrata.re as pyrata_re
data = [{'pos': 'PRP', 'raw': 'It'}, {'pos': 'VBZ', 'raw': 'is'},
{'pos': 'JJ', 'raw': 'fast'}, {'pos': 'JJ', 'raw': 'easy'},
{'pos': 'CC', 'raw': 'and'}, {'pos': 'JJ', 'raw': 'funny'},
{'pos': 'TO', 'raw': 'to'}, {'pos': 'VB', 'raw': 'write'},
{'pos': 'JJ', 'raw': 'regular'}, {'pos': 'NNS', 'raw': 'expressions'},
{'pos': 'IN', 'raw': 'with'},{'pos': 'NNP', 'raw': 'PyRATA'}]
pyrata_re.findall('pos~"NN."', data)
pyrata_re.findall('[(pos="NNS" | pos="NNP") & !raw="expressions"]', data)
# https://github.com/nicolashernandez/PyRATA/blob/master/docs/user-guide.rst#wildcard-element
# The wildcard element can match any single data token.
# It is represented by the . (dot) metacharacter.
pyrata_re.search('. raw="PyRATA"', data)
# It can be used with any quantifiers
pyrata_re.search('.+ raw="PyRATA"', data)
pattern = 'pos="JJ"'
pyrata_re.findall(pattern, data)
```
You can also set a list of possible values (lexicon). In that case, the operator will be the @ metacharacter in your constraint definition and the value will be the name of the lexicon. The lexicon is specified as a parameter of the pyrata_re methods (lexicons parameter).
```
pyrata_re.findall('raw@"positiveLexicon"', data,
lexicons = {'positiveLexicon':['easy', 'funny']})
# https://github.com/nicolashernandez/PyRATA/blob/master/docs/user-guide.rst#iob-chunk-operator
data = [{'pos': 'NNP', 'chunk': 'B-PERSON', 'raw': 'Mark'},
{'pos': 'NNP', 'chunk': 'I-PERSON', 'raw': 'Zuckerberg'},
{'pos': 'VBZ', 'chunk': 'O', 'raw': 'is'},
{'pos': 'VBG', 'chunk': 'O', 'raw': 'working'},
{'pos': 'IN', 'chunk': 'O', 'raw': 'at'},
{'pos': 'NNP', 'chunk': 'B-ORGANIZATION', 'raw': 'Facebook'},
{'pos': 'NNP', 'chunk': 'I-ORGANIZATION', 'raw': 'Corp'},
{'pos': '.', 'chunk': 'O', 'raw': '.'}]
pattern = 'chunk-"PERSON"'
pyrata_re.search(pattern, data)
```
| /saai-0.3.0-py3-none-any.whl/notebook/procs-pyrata.ipynb | 0.622804 | 0.804905 | procs-pyrata.ipynb | pypi |
```
import sagas
sagas.print_rs([('tom',5)], ['name','age'])
from rasa.nlu import config
conf=config.load('saai/sample_configs/config_crf_custom_features.yml')
# conf.for_component('DucklingHTTPExtractor')
conf.component_names
conf.language
# print(conf.get('DucklingHTTPExtractor'))
from rasa.nlu.training_data import TrainingData, Message
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
def testing_tokenizer(text, cls, lang='en'):
defaults = {
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
# text will be tokenized with case sensitive as default
"case_sensitive": True,
"lang": lang,
}
tok=cls(defaults)
example = Message(text, {
"intent": "wish",
"entities": []})
# tokenizer
tok.process(example, x='.')
for token in example.get("tokens"):
print(token.text, token.offset)
text='text will be tokenized with case sensitive as default'
testing_tokenizer(text, WhitespaceTokenizer)
from sagas.util.rest_common import query_data_by_url
lang='zh'
sents="在终端上输出单词的定义和继承链"
r=query_data_by_url('multilang', 'tokens', {'lang': lang, 'sents': sents})
r['data']
from typing import Any, Dict, List, Text
from rasa.nlu.tokenizers import Token, Tokenizer
from rasa.nlu.constants import (
MESSAGE_RESPONSE_ATTRIBUTE,
MESSAGE_INTENT_ATTRIBUTE,
MESSAGE_TEXT_ATTRIBUTE,
MESSAGE_TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
MESSAGE_SPACY_FEATURES_NAMES,
MESSAGE_VECTOR_FEATURE_NAMES,
)
class MultilangTokenizer(WhitespaceTokenizer):
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
self.lang = self.component_config["lang"]
print(f".. tokenizer with lang {self.lang}")
def tokenize(
self, text: Text, attribute: Text = MESSAGE_TEXT_ATTRIBUTE
) -> List[Token]:
if self.lang in ('zh','ja'):
r=query_data_by_url('multilang', 'tokens', {'lang': self.lang, 'sents': text})
words=r['data']
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
return super().tokenize(text, attribute)
text='text will be tokenized with case sensitive as default'
print(testing_tokenizer(text, MultilangTokenizer, 'en'))
text="在终端上输出单词的定义和继承链"
print(testing_tokenizer(text, MultilangTokenizer, 'zh'))
print(testing_tokenizer("望遠鏡で泳いでいる少女を見た。", MultilangTokenizer, 'ja'))
```
| /saai-0.3.0-py3-none-any.whl/notebook/procs-tokenizers.ipynb | 0.519765 | 0.176423 | procs-tokenizers.ipynb | pypi |
```
from rasa.utils.endpoints import ClientResponseError, EndpointConfig
from rasa.core.agent import Agent
from rasa.core.interpreter import RasaNLUInterpreter
from rasa.model import get_model, get_latest_model
# preq: $ start actions
bot='saya'
endpoint = EndpointConfig("http://localhost:5055/webhook")
bot_locs={'saya': '/pi/ws/sagas-ai/bots/saya'}
bot_loc=get_latest_model(f"{bot_locs[bot]}/models")
print(f'.. load bot model {bot_loc}')
agent = Agent.load(bot_loc, action_endpoint=endpoint)
await agent.handle_text("do you have any restaurants")
```
⊕ [Policies](https://rasa.com/docs/rasa/core/policies/#action-selection)
## Action Selection
At every turn, each policy defined in your configuration will predict a next action with a certain confidence level. For more information about how each policy makes its decision, read into the policy’s description below. The bot’s next action is then decided by the policy that predicts with the highest confidence.
In the case that two policies predict with equal confidence (for example, the Memoization and Mapping Policies always predict with confidence of either 0 or 1), the priority of the policies is considered. Rasa policies have default priorities that are set to ensure the expected outcome in the case of a tie. They look like this, where higher numbers have higher priority:
5. FormPolicy
4. FallbackPolicy and TwoStageFallbackPolicy
3. MemoizationPolicy and AugmentedMemoizationPolicy
2. MappingPolicy
1. EmbeddingPolicy, KerasPolicy, and SklearnPolicy
This priority hierarchy ensures that, for example, if there is an intent with a mapped action, but the NLU confidence is not above the nlu_threshold, the bot will still fall back. In general, it is not recommended to have more than one policy per priority level, and some policies on the same priority level, such as the two fallback policies, strictly cannot be used in tandem.
```
from rasa.core.channels.channel import CollectingOutputChannel
default_channel=CollectingOutputChannel()
policy='MappingPolicy'
# act='action_hello_world'
act='action_query_knowledge_base'
tracker=await agent.execute_action('default', act, default_channel, policy, 1.0)
tracker.current_state()
default_channel.latest_output()
default_channel.messages
await agent.handle_text("do you have any restaurants")
from rasa.model import get_model, get_latest_model
print(get_model("/pi/ws/sagas-ai/bots/saya/models"))
print(get_latest_model("/pi/ws/sagas-ai/bots/saya/models"))
from rasa.core.trackers import DialogueStateTracker
from rasa.core.slots import Slot
from pprint import pprint
tracker = DialogueStateTracker.from_dict("1", [], [Slot("requested_language")])
tracker._set_slot("requested_language", "en")
result = await agent.parse_message_using_nlu_interpreter("In what city is N26 located?", tracker)
pprint(result)
await agent.parse_message_using_nlu_interpreter("In what city is Landesbank Hessen-Thüringen located?", tracker)
bank='dekabank deutsche girozentrale'
await agent.parse_message_using_nlu_interpreter(f"In what city is {bank} located?", tracker)
person='Jannik Jung'
await agent.parse_message_using_nlu_interpreter(f"What is the email of {person}?")
person='Finn Hofmann'
await agent.parse_message_using_nlu_interpreter(f"What is the email of {person}?")
bank='Wirecard Bank'
q=f"Has {bank} an english website?"
await agent.parse_message_using_nlu_interpreter(q)
bank='landwirtschaftliche rentenbank'
q=f"Has {bank} an english website?"
await agent.parse_message_using_nlu_interpreter(q)
# restaurant='Berlin Burrito Company'
restaurant='Marubi Ramen'
q=f"What is the price range of {restaurant}?"
await agent.parse_message_using_nlu_interpreter(q)
```
| /saai-0.3.0-py3-none-any.whl/notebook/procs-bots-saya.ipynb | 0.426799 | 0.665645 | procs-bots-saya.ipynb | pypi |
```
from flair.data import Sentence
from flair.models import SequenceTagger
# make a sentence
sentence = Sentence('I love Berlin .')
# load the NER tagger
# download from: https://s3.eu-central-1.amazonaws.com//alan-nlp/resources/models-v0.4/NER-conll03-english/en-ner-conll03-v0.4.pt
# tagger = SequenceTagger.load('ner')
tagger = SequenceTagger.load('/pi/ai/flair/en-ner-conll03-v0.4.pt')
# run NER over sentence
tagger.predict(sentence)
print(sentence.to_tagged_string())
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
sentence.to_dict(tag_type='ner')
from flair.models import TextClassifier
# dowload from: https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4/classy-imdb-en-rnn-cuda%3A0/imdb-v0.4.pt
# classifier = TextClassifier.load('en-sentiment')
model='/pi/ai/flair/imdb-v0.4.pt'
classifier = TextClassifier.load(model)
sentence = Sentence('This film hurts. It is so bad that I am confused.')
# predict NER tags
classifier.predict(sentence)
# print sentence with predicted labels
print(sentence.labels)
```
| /saai-0.3.0-py3-none-any.whl/notebook/procs-flair.ipynb | 0.483648 | 0.163179 | procs-flair.ipynb | pypi |
from datetime import date, datetime, timedelta
import sys
import pandas as pd
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
class Holidays:
@classmethod
def to_datetime(self, datestr:str) -> datetime:
"""
Convert date strings like "20210506" to datetime type.
Parameters
----------
datestr : str (in '%Y%m%d' format), or valid datetime object.
Raises
------
ValueError
* If datestr argument is invalid
Examples
--------
>>> yyyymmdd = "20210530"
>>> dt = Holidays.to_datetime(yyyymmdd)
"""
if type(datestr) == type(datetime.now()):
return datestr
try:
return datetime(
year=int(datestr[0:4]),
month=int(datestr[4:6]),
day=int(datestr[6:8]),
)
except:
raise ValueError ("Wrong datestr argument, should be like: '20210526'.")
@classmethod
def tradingday(self, tm: datetime) -> bool:
"""
Return True if given date is a tradingday, or False otherwise
Parameters
----------
tm : datetime object, or valid date string like "20210530".
Raises
------
ValueError
* If tm argument is invalid
Examples
--------
>>> dt = datetime.now()
>>> is_tradingday = tradingday(dt)
"""
if type(tm) != type(datetime.now()):
tm = self.to_datetime(tm)
if int(tm.strftime("%w")) in [6,0]:
return False
tm_str = tm.strftime('%m/%d/%Y')
if tm_str in self.SSEHolidays:
return False
return True
@classmethod
def prev_tradingday(self, tm:datetime) -> datetime:
"""
Return the previous tradingday of a given date
Parameters
----------
tm : datetime object, or valid date string like "20210530".
Raises
------
ValueError
* If tm argument is invalid
Examples
--------
>>> dt = datetime.now()
>>> prev_td = prev_tradingday(dt)
"""
if type(tm) != type(datetime.now()):
tm = self.to_datetime(tm)
ret = tm + timedelta(days=-1)
while not (self.tradingday(ret)):
ret = ret + timedelta(days=-1)
return ret
@classmethod
def next_tradingday(self, tm: datetime) -> datetime:
"""
Return the next tradingday of a given date.
Parameters
----------
tm : datetime object, or valid date string like "20210530".
Raises
------
ValueError
* If tm argument is invalid
Examples
--------
>>> dt = datetime.now()
>>> next_td = next_tradingday(dt)
"""
if type(tm) != type(datetime.now()):
tm = self.to_datetime(tm)
ret = tm + timedelta(days=1)
while not (self.tradingday(ret)):
ret = ret + timedelta(days=1)
return ret
@classmethod
def get_holidays(self) -> pd.DataFrame:
"""
Return a pandas.DataFrame object with only one column
named 'Dates' containing all the holidays.
Examples
--------
>>> h_days = get_holidays()
"""
ret = pd.DataFrame()
ret['Dates'] = [datetime.strptime(dt, "%m/%d/%Y") for dt in self.SSEHolidays]
return ret
SSEHolidays = [
'01/01/2018', '02/15/2018', '02/16/2018', '02/17/2018', '02/18/2018', '02/19/2018' ,'02/20/2018',
'02/21/2018', '04/05/2018', '04/06/2018', '04/07/2018', '04/29/2018', '04/30/2018', '05/01/2018',
'06/16/2018', '06/17/2018', '06/18/2018', '10/01/2018', '10/02/2018', '10/03/2018',
'10/04/2018', '10/05/2018', '10/06/2018', '10/07/2018', '02/04/2019', '02/05/2019', '02/06/2019',
'02/07/2019', '02/08/2019', '02/09/2019', '02/10/2019', '04/05/2019', '04/06/2019', '04/07/2019',
'05/01/2019', '05/02/2019', '05/03/2019', '05/04/2019', '06/07/2019', '06/08/2019', '06/09/2019',
'09/13/2019', '09/14/2019', '09/15/2019', '10/01/2019', '10/02/2019', '10/03/2019',
'10/04/2019', '10/05/2019', '10/06/2019', '10/07/2019', '01/01/2020', '01/24/2020',
'01/25/2020', '01/26/2020', '01/27/2020', '01/28/2020', '01/29/2020', '01/30/2020',
'01/31/2020', '02/01/2020', '02/02/2020', '04/04/2020', '04/05/2020', '04/06/2020', '05/01/2020',
'05/02/2020', '05/03/2020', '05/04/2020', '05/05/2020', '06/25/2020', '06/26/2020', '06/27/2020',
'10/01/2020', '10/02/2020', '10/03/2020', '10/04/2020', '10/05/2020', '10/06/2020',
'10/07/2020', '10/08/2020', '01/01/2021', '01/02/2021', '01/03/2021', '02/11/2021', '02/12/2021',
'02/13/2021', '02/14/2021', '02/15/2021', '02/16/2021', '02/17/2021', '04/03/2021', '04/04/2021',
'04/05/2021', '05/01/2021', '05/02/2021', '05/03/2021', '05/04/2021', '05/05/2021',
'06/12/2021', '06/13/2021', '06/14/2021',
'09/19/2021', '09/20/2021', '09/21/2021', '10/01/2021', '10/02/2021', '10/03/2021', '10/04/2021',
'10/05/2021', '10/06/2021', '10/07/2021' ]
if '__main__' == __name__:
dt1 = datetime(2021,5,6)
tm1 = "20210506"
print(type(dt1), Holidays.to_datetime(dt1), Holidays.tradingday(dt1), Holidays.prev_tradingday(dt1), Holidays.next_tradingday(dt1))
print(type(tm1), Holidays.to_datetime(tm1), Holidays.tradingday(tm1), Holidays.prev_tradingday(tm1), Holidays.next_tradingday(tm1))
dt2 = datetime(2021,9,30)
tm2 = "20210930"
print(type(dt2), Holidays.to_datetime(dt2), Holidays.tradingday(dt2), Holidays.prev_tradingday(dt2), Holidays.next_tradingday(dt2))
print(type(tm2), Holidays.to_datetime(tm2), Holidays.tradingday(tm2), Holidays.prev_tradingday(tm2), Holidays.next_tradingday(tm2))
print(Holidays.get_holidays()) | /saaltfiish_boilerplate-0.1.3.tar.gz/saaltfiish_boilerplate-0.1.3/saaltfiish_boilerplate/holidays.py | 0.655557 | 0.284977 | holidays.py | pypi |
from ..._private._utilities import *
from ... import Event
from copy import deepcopy
SIMPLE_CHANGE_INDICATOR = lambda x, y: x != y
class DataStore(ABC):
@abstractmethod
def get_value(self, key: str) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def set_value(self, key: str, value: Any, **kwargs) -> None:
raise NotImplementedError()
@abstractmethod
def contains(self, key: str) -> bool:
raise NotImplementedError()
def __getitem__(self, item: str) -> Optional[Any]:
return self.get_value(item)
def __setitem__(self, key: str, value: Any) -> None:
self.set_value(key, value)
def __contains__(self, item: str) -> bool:
return self.contains(item)
# noinspection PyAttributeOutsideInit,PyProtectedMember
@synchronized
def get_branch(self, prefix: str,
value_generator: Callable[[str], Any] = None,
change_event_handlers: List[Callable[[str, Optional[Any], Any], None]] = None,
change_indicator: Callable[[Any, Any], bool] = SIMPLE_CHANGE_INDICATOR
):
if not hasattr(self, '_branches'):
self._branches: Dict[str, DataStoreBranch] = dict()
if prefix in self._branches:
result: DataStoreBranch = self._branches[prefix]
if value_generator is not None:
if result._value_generator is not None:
raise ValueError(f'Branch with prefix "{prefix}" already has a value generator')
else:
result._value_generator = value_generator
if change_indicator != SIMPLE_CHANGE_INDICATOR:
if result._change_indicator != SIMPLE_CHANGE_INDICATOR:
raise ValueError(f'Branch with prefix "{prefix}" already has a change indicator')
else:
result._change_indicator = change_indicator
if change_event_handlers is not None and len(change_event_handlers) > 0:
if result._change_event_handlers is None:
result._change_event_handlers = change_event_handlers
else:
result._change_event_handlers += change_event_handlers
else:
result = DataStoreBranch(self, prefix, value_generator, change_event_handlers, change_indicator)
self._branches[prefix] = result
return result
class DataStoreBranch(DataStore):
def __init__(self,
parent_data_store: DataStore,
prefix: str,
value_generator: Callable[[str], Any],
change_event_handlers: List[Callable[[str, Optional[Any], Any], None]],
change_indicator: Callable[[Any, Any], bool]
):
super().__init__()
self._parent_data_store = parent_data_store
self._prefix = prefix
self._value_generator = value_generator
self._change_event_handlers = change_event_handlers
self._change_indicator = change_indicator
def get_value(self, key: str) -> Optional[Any]:
result = self._parent_data_store.get_value(self._prefix + key)
if result is None and self._value_generator is not None:
result = self._value_generator(key)
self._parent_data_store.set_value(self._prefix + key, result)
if (self._change_event_handlers is not None
and len(self._change_event_handlers) > 0
and self._change_indicator(None, result)):
for handler in self._change_event_handlers:
handler(key, None, result)
return result
def set_value(self, key: str, value: Any, **kwargs) -> None:
should_call_handlers = False
if self._change_event_handlers is not None and len(self._change_event_handlers) > 0:
previous_value = self.get_value(key)
should_call_handlers = True
if not self._change_indicator(previous_value, value):
return
self._parent_data_store.set_value(self._prefix + key, value, **kwargs)
if should_call_handlers:
for handler in self._change_event_handlers:
# noinspection PyUnboundLocalVariable
handler(key, previous_value, value, **kwargs)
def contains(self, key: str) -> bool:
return self._parent_data_store.contains(self._prefix + key)
class SimpleRAMDataStore(DataStore):
def __init__(self):
super().__init__()
self._storage: Dict[str, Any] = dict()
def get_value(self, key: str) -> Optional[Any]:
return deepcopy(self._storage.get(key, None))
def set_value(self, key: str, value: Any, **kwargs) -> None:
self._storage[key] = deepcopy(value)
def contains(self, key: str) -> bool:
return key in self._storage | /saba-pipeline-1.0.2.tar.gz/saba-pipeline-1.0.2/src/sabapipeline/config/_private/_store.py | 0.753648 | 0.258367 | _store.py | pypi |
.. include:: references.txt
Saba: Sherpa-Astropy Bridge
===========================
The Saba package provides a bridge between the convenient model definition
language provided in the `astropy.modeling` package and the powerful fitting
capabilities of the Sherpa_ modeling and fitting package. In particular,
Sherpa has a selection of robust optimization algorithms coupled with
configurable fit statistics. Once the model fit is complete Sherpa has three
different ways to estimate parameter confidence intervals, including methods
that allow for coupled non-gaussian errors. Finally, Sherpa has an MCMC
sampler that can be used to generate draws from the probability distribution
assuming given priors.
Once Saba and Sherpa are installed, the Saba package exposes the above Sherpa
functionality within the `astropy.modeling.fitting` package via a single
|SherpaFitter| class which acts as a fitting backend within astropy. If
using the latest version of astropy (development or >= 1.3), a plugin registry
system automatically makes the |SherpaFitter| class available within the
`astropy.modeling.fitting` module without requiring an explicit import.
``Saba`` is the Sherpa people's word for "bridge".
Installation
------------
The following installation notes apply to the development version of Saba and
assume use of the conda + Anaconda package system.
Prerequisites
^^^^^^^^^^^^^^
* Python 2.7 (support for Python 3.5+ is in work)
* numpy
* astropy
* sherpa
::
conda install numpy
To make use of the entry points plugin registry which automatically makes the |SherpaFitter| class available within `astropy.modeling.fitting` install `astropy` version >= 1.33.
Otherwise one can just use the latest stable ``astropy`` via::
conda install astropy
Next install Sherpa_ using the conda ``sherpa`` channel. Note that Sherpa
currently needs to be installed after astropy on Mac OSX.
::
conda install -c sherpa sherpa
Finally install ``saba`` using pip::
pip install saba
Getting started
----------------
If you are not already familiar with `astropy.modeling`, now is a good time to
review the introductory documentation there along with the
`astropy.modeling.fitting` module details.
To start with Saba let's import the |SherpaFitter| class which is the interface
with Sherpa's fitting routines. |SherpaFitter| is available in one of
two ways, either directly from `saba` or through `astropy.modeling.fitting`
through the plugin registry system. The latter method is preferred but requires
`astropy` version >= 1.3 or the latest development (master) version. Use:
.. code-block:: ipython
from saba import SherpaFitter
or
.. code-block:: ipython
from astropy.modeling.fitting import SherpaFitter
Initialization
^^^^^^^^^^^^^^^
To initialize a fitter we provide string values to define the ``statistic``,
``optimizer`` and ``estmethod`` (error estimation method). The available
values for those can be found in the docstring of |SherpaFitter| and
relate to objects within `sherpa.stats`, `sherpa.optmethods` and
`sherpa.estmethods`.
.. code-block:: ipython
sfit = SherpaFitter(statistic='chi2', optimizer='levmar', estmethod='confidence')
Now that we have a fitter instance we need something to fit. So let's import an
astropy model, specifically `~astropy.modeling.functional_models.Gaussian1D`. A
full description astropy's model and capabilities can be found in the ``astropy``
`Instantiating and Evaluating Models
<http://docs.astropy.org/en/stable/modeling/models.html>`_ section.
.. code-block:: ipython
from astropy.modeling.models import Gaussian1D
We also need some data so let's make some data with some added noise.
.. code-block:: ipython
import numpy as np
np.random.seed(0x1337)
true = Gaussian1D(amplitude=3, mean=0.9, stddev=0.5)
err = 0.05
step = 0.1
x = np.arange(-3, 3, step)
y = true(x) + err * np.random.uniform(-1, 1, size=len(x))
yerrs = err * np.random.uniform(0.2, 1, size=len(x))
binsize = step * np.ones(x.shape)
# please note that binsize is the width of the bin!
fit_model = true.copy() # ofset fit model from true
fit_model.amplitude = 2
fit_model.mean = 0
fit_model.stddev = 0.2
For good measure let's plot it and take a look:
.. image:: _generated/example_plot_data.png
:width: 500px
Fitting
^^^^^^^
Now we have some data let's fit it and hopefully we get something similar to
"True" back. The ``sfit`` fitter object has already been initialized (as would
be done for other `astropy.modeling.fitting` fitters) so we just call it with
some data and an astropy model and we get the fitted model returned.
.. code-block:: ipython
fitted_model = sfit(fit_model, x, y, xbinsize=binsize, err=yerrs)
Once again plotting the data.
.. image:: _generated/example_plot_fitted.png
:width: 500px
Now we have a fit we can look at the outputs by doing:
.. code-block:: ipython
print(sfit.fit_info)
.. code-block:: ipython
datasets = None
itermethodname = none
methodname = levmar
statname = chi2
succeeded = True
parnames = ('wrap_.amplitude', 'wrap_.mean', 'wrap_.stddev')
parvals = (3.0646789274093185, 0.77853851419777986, 0.50721937454701504)
statval = 82.7366242121
istatval = 553.030876852
dstatval = 470.29425264
numpoints = 30
dof = 27
qval = 1.44381192266e-07
rstat = 3.06431941526
message = successful termination
nfev = 84
Note that the ``fit_info`` attribute is custom to the |SherpaFitter| class and
provides a direct link to the internal fitting results from the Sherpa fit
process.
Uncertainty estimation
^^^^^^^^^^^^^^^^^^^^^^
One of the main drivers for Saba is to get access the uncertainty estimation
methods provided by Sherpa_. This is done though the
`~saba.SherpaFitter.est_errors` method which uses the Sherpa's
`~sherpa.fit.Fit.est_errors` method. To get the errors make a call such as:
.. code-block:: ipython
param_errors = sfit.est_errors(sigma=3) # Note that sigma can be an input
In return we get a tuple of (``parameter_name``, ``best_fit_value``,
``lower_value`` , ``upper_value``). For the sake of plotting them we make
models for the upper and lower values, and then output the values while we're at it.
.. code-block:: ipython
min_model = fitted_model.copy()
max_model = fitted_model.copy()
for pname, pval, pmin, pmax in zip(*param_errors):
print(pname, pval, pmin, pmax)
getattr(min_model, pname).value = pval + pmin
getattr(max_model, pname).value = pval + pmax
.. code-block:: ipython
('amplitude', 3.0646789274093185, -0.50152026852144349, 0.56964617033348119)
('mean', 0.77853851419777986, -0.096264447380365548, 0.10293940565584792)
('stddev', 0.50721937454701504, -0.098092469817728456, 0.11585973498734969)
.. image:: _generated/example_plot_error.png
:width: 500px
Using Saba
----------
.. toctree::
:maxdepth: 2
examples_complex
examples_2d
examples_mcmc
API/Reference
-------------
.. toctree::
:maxdepth: 2
api.rst
Credit
------
The development of this package was made possible by the generous support of the `Google Summer of Code <https://summerofcode.withgoogle.com/>`_ program in 2016 under the `OpenAstronomy <http://openastronomy.org/>`_ by `Michele Costa <https://github.com/nocturnalastro>`_ with the support and advice of mentors `Tom Aldcroft <https://github.com/taldcroft>`_, `Omar Laurino <https://github.com/olaurino>`_, `Moritz Guenther <https://github.com/hamogu>`_, and `Doug Burke <https://github.com/DougBurke>`_.
| /saba-0.1.1a0.tar.gz/saba-0.1.1a0/docs/index.rst | 0.945889 | 0.777807 | index.rst | pypi |
.. include:: references.txt
Usage details
==============
Now that you have the basics let's move on to some more complex usage of the fitter interface.
First a quick preamble to do some imports and create our |SherpaFitter| object.
.. code-block:: ipython
from astropy.modeling.fitting import SherpaFitter
sfit = SherpaFitter(statistic='chi2', optimizer='levmar', estmethod='confidence')
from astropy.modeling.models import Gaussian1D
import numpy as np
np.random.seed(0x1337)
Parameter constraints
---------------------
If you place any of the parameter constraints on the astropy models then they will be respected by the fitter. Let's take a quick look at that. Firstly let's make a compound model by adding two `~astropy.modeling.functional_models.Gaussian1D` instances:
.. code-block:: ipython
double_gaussian = (Gaussian1D(amplitude=10, mean=-1.5, stddev=0.5) +
Gaussian1D(amplitude=3, mean=0.9, stddev=0.5))
Now we have the compound model lets add tie `amplitude_1` (the amplitude of the right hand side `~astropy.modeling.functional_models.Gaussian1D`) to `1.2*amplitude_0` and while we're at it let us generate some data, too.
To do this we must first define the `tiedfunc`:
.. code-block:: ipython
def tiedfunc(self): # a function used for tying amplitude_1
return 1.2*self.amplitude_0
double_gaussian.amplitude_1.tied = tiedfunc
err = 0.8
step = 0.2
x = np.arange(-3, 3, step)
y = double_gaussian(x) + err * np.random.uniform(-1, 1, size=len(x))
yerrs = err * np.random.uniform(0.2, 1, size=len(x))
binsize=(step/2) * np.ones(x.shape)
.. note :: without astropy PR #5129 we need to do this
``double_gaussian.amplitude_1.value = double_gaussian.amplitude_1.tied(double_gaussian)``
.. image:: _generated/example_plot_data2.png
:width: 500px
Let's add some more parameter constraints to the model and fit the data.
We can print the sherpa models to check things are doing what they should.
.. code-block:: ipython
fit_gg = double_gaussian.copy()
fit_gg.mean_0.value = -0.5
# sets the lower bound so we can force the parameter against it
fit_gg.mean_0.min = -1.25
fit_gg.mean_1.value = 0.8
fit_gg.stddev_0.value = 0.9
fit_gg.stddev_0.fixed = True
Fitting Config
--------------
An initialized `~saba.SherpaFitter` object has the `opt_config` property which holds the configuration details for the optimization routine. It's docstring contains information about the the properties of the optimizer.
.. code-block:: ipython
print(sfit.opt_config)
.. code-block:: ipython
{'epsfcn': 1.1920928955078125e-07,
'factor': 100.0,
'ftol': 1.1920928955078125e-07,
'gtol': 1.1920928955078125e-07,
'maxfev': None,
'verbose': 0,
'xtol': 1.1920928955078125e-07}
.. code-block:: ipython
print(sfit.opt_config.__doc__) # as help returns the help for the returned object
.. code-block:: ipython
Levenberg-Marquardt optimization method.
The Levenberg-Marquardt method is an interface to the MINPACK
subroutine lmdif to find the local minimum of nonlinear least
squares functions of several variables by a modification of the
Levenberg-Marquardt algorithm [1]_.
Attributes
----------
ftol : number
The function tolerance to terminate the search for the minimum;
the default is sqrt(DBL_EPSILON) ~ 1.19209289551e-07, where
DBL_EPSILON is the smallest number x such that `1.0 != 1.0 +
x`. The conditions are satisfied when both the actual and
predicted relative reductions in the sum of squares are, at
most, ftol.
xtol : number
The relative error desired in the approximate solution; default
is sqrt( DBL_EPSILON ) ~ 1.19209289551e-07, where DBL_EPSILON
is the smallest number x such that `1.0 != 1.0 + x`. The
conditions are satisfied when the relative error between two
consecutive iterates is, at most, `xtol`.
...
The parameters can be changed by
.. code-block:: ipython
sfit.opt_config['ftol'] = 1e-5
print(sfit.opt_config)
.. code-block:: ipython
{'epsfcn': 1.1920928955078125e-07,
'factor': 100.0,
'ftol': 1e-05,
'gtol': 1.1920928955078125e-07,
'maxfev': None,
'verbose': 0,
'xtol': 1.1920928955078125e-07}
Fitting this model is the same as earlier and we can also fit an unconstrained model for comparison:
.. code-block:: ipython
fitted_gg = sfit(fit_gg,x, y, xbinsize=binsize, err=yerrs)
sfit2 = SherpaFitter(statistic='chi2', optimizer='levmar', estmethod='covariance')
free_gg = sfit2(double_gaussian.copy(), x, y, xbinsize=binsize, err=yerrs)
.. image:: _generated/example_plot_fitted2.png
:width: 500px
The fitter keeps a copy of the converted model so we can use it to compare the constrained and unconstrained model setups:
.. note ::
``wrap\_.amplitude_1`` should be `linked`, sherpa notation of astropy's `tied`
``wrap\_.stddev_0`` should be `frozen`, sherpa notation for `fixed`
and finally ``wrap\_.mean_0``'s value should have moved to its minimum while fitting
"wrap\_" is just perpended to the model name (we didn't set one so it's blank) on conversion to the sherpa `~sherpa.models.model.Model`.
.. code-block:: ipython
print("##Fit with constraints")
print(sfit._fitmodel.sherpa_model)
print("##Fit without constraints")
print(sfit2._fitmodel.sherpa_model)
.. code-block:: ipython
##Fit with constraints
Param Type Value Min Max Units
----- ---- ----- --- --- -----
wrap_.amplitude_0 thawed 5.58947 -3.40282e+38 3.40282e+38
wrap_.mean_0 thawed -1.25 -1.25 3.40282e+38
wrap_.stddev_0 frozen 0.9 -3.40282e+38 3.40282e+38
wrap_.amplitude_1 linked 6.70736 expr: (1.2 * wrap_.amplitude_0)
wrap_.mean_1 thawed 0.869273 -3.40282e+38 3.40282e+38
wrap_.stddev_1 thawed 0.447021 -3.40282e+38 3.40282e+38
##Fit without constraints
Param Type Value Min Max Units
----- ---- ----- --- --- -----
wrap_.amplitude_0 thawed 6.95483 -3.40282e+38 3.40282e+38
wrap_.mean_0 thawed -1.59091 -3.40282e+38 3.40282e+38
wrap_.stddev_0 thawed 0.545582 -3.40282e+38 3.40282e+38
wrap_.amplitude_1 linked 8.34579 expr: (1.2 * wrap_.amplitude_0)
wrap_.mean_1 thawed 0.785016 -3.40282e+38 3.40282e+38
wrap_.stddev_1 thawed 0.46393 -3.40282e+38 3.40282e+38
Error Estimation Configuration
------------------------------
As with the `~sherpa.optmethods` before we are able to adjust the configuration of the `~sherpa.estmethods`. Some of the properties can be passed through `~saba.SherpaFitter.est_errors` as keyword arguments such as the `sigma` however for access to all options we have the `est_config` property.
.. code-block:: ipython
print(sfit.est_config)
sfit.est_config['numcores'] = 5
sfit.est_config['max_rstat'] = 4
print(sfit.est_config)
.. code-block:: ipython
{'eps': 0.01,
'fast': False,
'max_rstat': 3,
'maxfits': 5,
'maxiters': 200,
'numcores': 8,
'openinterval': False,
'parallel': True,
'remin': 0.01,
'sigma': 1,
'soft_limits': False,
'tol': 0.2,
'verbose': False}
{'eps': 0.01,
'fast': False,
'max_rstat': 3,
'maxfits': 5,
'maxiters': 200,
'numcores': 5,
'openinterval': False,
'parallel': True,
'remin': 0.01,
'sigma': 1,
'soft_limits': False,
'tol': 0.2,
'verbose': False}
Multiple models or multiple datasets
------------------------------------
We have three scenarios we can handle:
- Fitting ``N`` datasets with ``N`` models
- Fitting a single dataset with ``N`` models
- Fitting ``N`` datasets with a single model
If ``N > 1`` for any of the scenarios then calling the fitter will return a list of models. Firstly we look at a single dataset with the two models as above.
We quickly copy the two models above and supply them to the fitter as a list - hopefully we get the same result.
.. code-block:: ipython
fit_gg = double_gaussian.copy()
fit_gg.mean_0.value = -0.5
fit_gg.mean_0.min = -1.25
fit_gg.mean_1.value = 0.8
fit_gg.stddev_0.value = 0.9
fit_gg.stddev_0.fixed = True
fm1, fm2 = sfit([fit_gg, double_gaussian.copy()], x, y, xbinsize=binsize, err=yerrs)
.. image:: _generated/example_plot_simul.png
:width: 500px
We also can fit multiple datasets with a single model so let's make a second dataset:
.. code-block:: ipython
second_gg = double_gaussian.copy()
second_gg.mean_0 = -2
second_gg.mean_1 = 0.5
second_gg.amplitude_0 = 8
second_gg.amplitude_1 = 5
second_gg.stddev_0 = 0.4
second_gg.stddev_1 = 0.8
y2 = second_gg(x) + err * np.random.uniform(-1, 1, size=len(x))
y2errs = err * np.random.uniform(0.2, 1, size=len(x))
Here we supply lists for each of the data parameters. You can also use ``None`` for when you don't have something like a missing binsizes - a lack of binsizes is a contrived example but a lack of ``y`` errors is not suitable for a chi:sup:2 fit and you don't want to make a new fitter.
.. code-block:: ipython
fit_gg = double_gaussian.copy()
fit_gg.mean_0 = -2.3
fit_gg.mean_1 = 0.7
fit_gg.amplitude_0 = 2
fit_gg.amplitude_1 = 3
fit_gg.stddev_0 = 0.3
fit_gg.stddev_1 = 0.5
fm1, fm2 = sfit(fit_gg, x=[x, x], y=[y, y2], xbinsize=[binsize, None], err=[yerrs, y2errs])
.. image:: _generated/example_plot_simul2.png
:width: 500px
Background Data
---------------
It is also possible specify background data which is required for several of the fit statistics.
This is done by supplying a background array using the `bkg` keyword. If there is a scaling of the background relative to the source data then you can use the `bkg_scale` keyword
.. code-block:: ipython
y[y<0]=0
cfit = SherpaFitter(statistic='cstat', optimizer='levmar', estmethod='covariance')
cfit(fit_gg, x=x, y=y, xbinsize=binsize, err=yerrs, bkg=y, bkg_scale=0.3)
.. image:: _generated/example_plot_bkg.png
:width: 500px
| /saba-0.1.1a0.tar.gz/saba-0.1.1a0/docs/examples_complex.rst | 0.923256 | 0.69383 | examples_complex.rst | pypi |
from astropy.modeling.fitting import SherpaFitter
from astropy.modeling.models import Gaussian1D, Gaussian2D
import numpy as np
import matplotlib.pyplot as plt
sfitter = SherpaFitter(statistic='chi2', optimizer='levmar', estmethod='confidence')
sfitter.est_config['max_rstat'] = 4
np.random.seed(0x1337)
true = Gaussian1D(amplitude=3, mean=0.9, stddev=0.5)
err = 0.8
step = 0.2
x = np.arange(-3, 3, step)
y = true(x) + err * np.random.uniform(-1, 1, size=len(x))
yerrs = err * np.random.uniform(0.2, 1, size=len(x))
# binsize=step * np.ones(x.shape) # please note these are binsize/2 not
# true errors!
# please note these are binsize/2 not true errors!
binsize = step * np.ones(x.shape)
fit_model = true.copy() # ofset fit model from true
fit_model.amplitude = 2
fit_model.mean = 0
fit_model.stddev = 0.2
plt.plot(x, true(x), label="True")
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="Data")
plt.plot(x, fit_model(x), label="Starting fit model")
plt.legend(loc=(0.02, 0.7), frameon=False)
plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_data.png")
plt.close('all')
fitted_model = sfitter(fit_model, x, y, xbinsize=binsize, err=yerrs)
plt.plot(x, true(x), label="True")
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="Data")
plt.plot(x, fit_model(x), label="Starting fit model")
plt.plot(x, fitted_model(x), label="Fitted model")
plt.legend(loc=(0.02, 0.6), frameon=False)
plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_fitted.png")
plt.close('all')
param_errors = sfitter.est_errors(sigma=3)
min_model = fitted_model.copy()
max_model = fitted_model.copy()
for pname, pval, pmin, pmax in zip(*param_errors):
print(pname, pval, pmin, pmax)
getattr(min_model, pname).value = pval + pmin
getattr(max_model, pname).value = pval + pmax
plt.plot(x, true(x), label="True")
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="")
plt.plot(x, fitted_model(x), label="Fitted model")
plt.plot(x, min_model(x), label="min model", ls="--")
plt.plot(x, max_model(x), label="max model", ls="--")
plt.legend(loc=(0.02, 0.6), frameon=False)
_ = plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_error.png")
plt.close('all')
sfitter = SherpaFitter(statistic='chi2', optimizer='levmar', estmethod='confidence')
double_gaussian = Gaussian1D(
amplitude=10, mean=-1.5, stddev=0.5) + Gaussian1D(amplitude=1, mean=0.9,
stddev=0.5)
def tiedfunc(self): # a function used for tying amplitude_1
return 1.2 * self.amplitude_0
double_gaussian.amplitude_1.tied = tiedfunc
double_gaussian.amplitude_1.value = double_gaussian.amplitude_1.tied(
double_gaussian)
err = 0.8
step = 0.2
x = np.arange(-3, 3, step)
y = double_gaussian(x) + err * np.random.uniform(-1, 1, size=len(x))
yerrs = err * np.random.uniform(0.2, 1, size=len(x))
# please note these are binsize/2 not true errors!
binsize = (step / 2) * np.ones(x.shape)
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="data")
# once again xerrs are binsize/2 not true errors!
plt.plot(x, double_gaussian(x), label="True")
plt.legend(loc=(0.78, 0.8), frameon=False)
_ = plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_data2.png")
plt.close('all')
fit_gg = double_gaussian.copy()
fit_gg.mean_0.value = -0.5
# sets the lower bound so we can force the parameter against it
fit_gg.mean_0.min = -1.25
fit_gg.mean_1.value = 0.8
fit_gg.stddev_0.value = 0.9
fit_gg.stddev_0.fixed = True
fitted_gg = sfitter(fit_gg, x, y, xbinsize=binsize, err=yerrs)
print("##Fit with contraints")
print(sfitter._fitmodel.sherpa_model)
free_gg = sfitter(double_gaussian.copy(), x, y, xbinsize=binsize, err=yerrs)
print()
print("##Fit without contraints")
print(sfitter._fitmodel.sherpa_model)
plt.figure(figsize=(10, 5))
plt.plot(x, double_gaussian(x), label="True")
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="data")
plt.plot(x, fit_gg(x), label="Pre fit")
plt.plot(x, fitted_gg(x), label="Fitted")
plt.plot(x, free_gg(x), label="Free")
plt.subplots_adjust(right=0.8)
plt.legend(loc=(1.01, 0.55), frameon=False)
plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_fitted2.png")
plt.close('all')
fit_gg = double_gaussian.copy()
fit_gg.mean_0.value = -0.5
fit_gg.mean_0.min = -1.25
fit_gg.mean_1.value = 0.8
fit_gg.stddev_0.value = 0.9
fit_gg.stddev_0.fixed = True
fm1, fm2 = sfitter([fit_gg, double_gaussian.copy()],
x, y, xbinsize=binsize, err=yerrs)
plt.figure(figsize=(10, 5))
plt.plot(x, double_gaussian(x), label="True")
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="data")
plt.plot(x, fit_gg(x), label="Pre fit")
plt.plot(x, fm1(x), label="Constrained")
plt.plot(x, fm2(x), label="Free")
plt.subplots_adjust(right=0.8)
plt.legend(loc=(1.01, 0.55), frameon=False)
plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_simul.png")
plt.close("all")
fit_gg = double_gaussian.copy()
fit_gg.mean_0 = -2.3
fit_gg.mean_1 = 0.7
fit_gg.amplitude_0 = 2
fit_gg.amplitude_1 = 3
fit_gg.stddev_0 = 0.3
fit_gg.stddev_1 = 0.5
second_gg = double_gaussian.copy()
second_gg.mean_0 = -2
second_gg.mean_1 = 0.5
second_gg.amplitude_0 = 8
second_gg.amplitude_1 = 5
second_gg.stddev_0 = 0.4
second_gg.stddev_1 = 0.8
second_gg.amplitude_1.value = second_gg.amplitude_1.tied(second_gg)
yy2 = second_gg(x) + err * np.random.uniform(-1, 1, size=len(x))
yy2errs = err * np.random.uniform(0.2, 1, size=len(x))
plt.errorbar(x, y, xerr=binsize, yerr=yerrs, ls="", label="data1")
plt.errorbar(x, yy2, yerr=yy2errs, ls="", label="data2")
plt.plot(x, fit_gg(x), label="Prefit")
fitted_model = sfitter(fit_gg, x=[x, x], y=[y, yy2], xbinsize=[
binsize, None], err=[yerrs, yy2errs])
plt.plot(x, fitted_model[0](x), label="Fitted")
plt.plot(x, fitted_model[1](x), label="Fitted")
plt.subplots_adjust(right=0.8)
plt.legend(loc=(1.01, 0.55), frameon=False)
plt.xlim((-3, 3))
plt.savefig("_generated/example_plot_simul2.png")
plt.close("all")
cy=y.copy()
cy[cy<0]=0
cfitter = SherpaFitter(statistic='cstat', optimizer='levmar', estmethod='covariance')
cmo=cfitter(fit_gg, x=x, y=cy, xbinsize=binsize, err=yerrs, bkg=y, bkg_scale=0.3)
plt.errorbar(x, cy, yerr=yerrs, xerr=binsize)
plt.plot(x, cmo(x))
plt.savefig("_generated/example_plot_bkg.png")
plt.close("all")
np.random.seed(123456789)
x0low, x0high = 3000, 4000
x1low, x1high = 4000, 4800
dx = 15
x1, x0 = np.mgrid[x1low:x1high:dx, x0low:x0high:dx]
shape = x0.shape
x0, x1 = x0.flatten(), x1.flatten()
plt.rcParams['figure.figsize'] = (15, 5)
truth = Gaussian2D(x_mean=3512, y_mean=4418, x_stddev=150, y_stddev=150,
theta=20, amplitude=100)
mexp = truth(x0, x1).reshape(shape)
merr = abs(np.random.poisson(mexp) - mexp)
plt.subplot(1, 3, 1)
plt.imshow(mexp, origin='lower', cmap='viridis',
extent=(x0low, x0high, x1low, x1high),
interpolation='nearest', aspect='auto')
plt.title("True")
plt.subplot(1, 3, 2)
plt.imshow(merr, origin='lower', cmap='viridis',
extent=(x0low, x0high, x1low, x1high),
interpolation='nearest', aspect='auto')
plt.title("Noise")
plt.subplot(1, 3, 3)
plt.imshow((mexp + merr), origin='lower', cmap='viridis',
extent=(x0low, x0high, x1low, x1high),
interpolation='nearest', aspect='auto')
plt.title("True+Noise")
plt.savefig("_generated/example_plot_2d_data.png")
plt.close("all")
sfit = SherpaFitter(statistic="chi2")
fitmo = truth.copy()
fitmo.x_mean = 3650
fitmo.y_mean = 4250
fitmo.x_stddev = 100
fitmo.y_stddev = 100
fitmo.theta = 10
fitmo.amplitude = 50
fitmo = sfit(fitmo, x0.flatten(), x1.flatten(), mexp.flatten()+merr.flatten(), xbinsize=np.ones(x0.size)*dx, ybinsize=np.ones(x1.size)*dx, err=merr.flatten()+np.random.uniform(-0.5, 0.5, x0.size))
plt.subplot(1, 2, 1)
plt.imshow(fitmo(x0, x1).reshape(shape), origin='lower', cmap='viridis',
extent=(x0low, x0high, x1low, x1high),
interpolation='nearest', aspect='auto')
plt.title("Fit Model")
res = (mexp + merr) - fitmo(x0, x1).reshape(shape)
plt.subplot(1, 2, 2)
plt.imshow(res, origin='lower', cmap='viridis',
extent=(x0low, x0high, x1low, x1high),
interpolation='nearest', aspect='auto')
plt.title("Residuals")
plt.savefig("_generated/example_plot_2d_fit.png")
plt.close("all")
from astropy.modeling.models import Polynomial1D
x = np.arange(0, 10, 0.1)
y = 2+3*x**2+0.5*x
sfit = SherpaFitter(statistic="Cash")
print(sfit(Polynomial1D(2), x, y))
sampler = sfit.get_sampler()
def lognorm(x):
# center on 10^20 cm^2 with a sigma of 0.5
sigma = 0.5
x0 = 1
# nH is in units of 10^-22 so convert
dx = np.log10(x) - x0
norm = sigma / np.sqrt(2 * np.pi)
return norm * np.exp(-0.5*dx*dx/(sigma*sigma))
sampler.set_prior("c0", lognorm)
_ = sampler(20000)
def plotter(xx, yy, c):
px = []
py = []
for (xlo, xhi), y in zip(zip(xx[:-1], xx[1:]), yy):
px.extend([xlo, xhi])
py.extend([y, y])
plt.plot(px, py, c=c)
def plot_hist(sampler, pname, nbins, c="b"):
yy, xx = np.histogram(sampler.parameters[pname][sampler.accepted], nbins)
plotter(xx, yy, c)
plt.axvline(sampler.parameter_map[pname].val, c=c)
plt.figure(figsize=(3.2, 6))
plt.subplot(311)
plot_hist(sampler, 'c0', 100, 'k')
plt.text(0.1, 350, "c0")
plt.subplot(312)
plot_hist(sampler, 'c1', 100, 'r')
plt.text(-2.9, 350, "c2")
plt.ylabel("Number of accepted fits")
plt.subplot(313)
plot_hist(sampler, 'c2', 100, 'b')
plt.text(2.61, 300, "c3")
plt.xlabel("Parameter value")
plt.subplots_adjust(left=0.2)
plt.savefig("_generated/example_plot_mcmc_hist.png")
plt.close("all")
def plot_cdf(sampler, pname, nbins, c="b", sigfrac=0.682689):
y, xx = np.histogram(sampler.parameters[pname][sampler.accepted], nbins)
cdf = [y[0]]
for yy in y[1:]:
cdf.append(cdf[-1]+yy)
cdf = np.array(cdf)
cdf = cdf / float(cdf[-1])
plotter(xx, cdf, c)
plt.axvline(sampler.parameter_map[pname].val, c=c)
med_ind = np.argmin(abs(cdf-0.5))
plt.axvline((xx[med_ind]+xx[med_ind+1])/2, ls="--", c=c)
siglo = (1-sigfrac)/2.0
sighi = (1+sigfrac)/2.0
lo_ind = np.argmin(abs(cdf-siglo))
hi_ind = np.argmin(abs(cdf-sighi))
plt.axvline((xx[lo_ind]+xx[lo_ind+1])/2, ls="--", c=c)
plt.axvline((xx[hi_ind]+xx[hi_ind+1])/2, ls="--", c=c)
plt.figure(figsize=(3, 6))
plt.subplot(311)
plot_cdf(sampler, 'c0', 100, 'k')
plt.text(0.1, 0.89, "c0")
plt.subplot(312)
plot_cdf(sampler, 'c1', 100, 'r')
plt.text(-2.9, 0.89, "c1")
plt.ylabel("CDF")
plt.subplot(313)
plot_cdf(sampler, 'c2', 100, 'b')
plt.text(2.61, 0.89, "c2")
plt.xlabel("Parameter value")
plt.subplots_adjust(left=0.2)
plt.savefig("_generated/example_plot_mcmc_cdf.png")
plt.close("all")
print("Done") | /saba-0.1.1a0.tar.gz/saba-0.1.1a0/docs/gen_plots.py | 0.569613 | 0.645762 | gen_plots.py | pypi |
```
from astropy.modeling.core import Fittable1DModel
from sherpa.utils import interpolate
from sherpa.astro.utils import rmf_fold
from astropy.modeling.models import Gaussian1D,Gaussian2D
import numpy as n
%pylab inline
gmodel = Gaussian1D(mean=30,amplitude=10,stddev=5)
x_in = np.linspace(1,100,200)
y_in = gmodel(x_in)
plt.plot(x_in,y_in)
x_rsp = np.linspace(1,100,200)
y_rsp = np.zeros_like(x_rsp)
y_rsp[(x_rsp > 30) & (x_rsp < 60)] = 1
plt.plot(x_rsp, y_rsp)
_grp = np.ones_like(x_in)
_fch = np.arange(1,x_in.max())
_nch = np.ones_like(y_in)
_rsp = np.zeros_like(y_in)
rmf_fold(y_in,_grp,_fch,_nch,np.repeat(y_rsp,len(x_in)),len(x_in),1)
print(np.tile(_rsp,len(x_in))
```
| /saba-0.1.1a0.tar.gz/saba-0.1.1a0/docs/.ipynb_checkpoints/Untitled-checkpoint.ipynb | 0.608478 | 0.470128 | Untitled-checkpoint.ipynb | pypi |
# Do not remove the following comment; it is used by
# astropy_helpers.version_helpers to determine the beginning of the code in
# this module
# BEGIN
import locale
import os
import subprocess
import warnings
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not _get_repo_path(path, levels=0):
return ''
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
def _get_repo_path(pathname, levels=None):
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None | /saba-0.1.1a0.tar.gz/saba-0.1.1a0/astropy_helpers/astropy_helpers/git_helpers.py | 0.55447 | 0.156105 | git_helpers.py | pypi |
import inspect
import sys
import re
import os
from warnings import warn
from sphinx.ext.autosummary.generate import find_autosummary_in_docstring
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
def find_autosummary_in_lines_for_automodsumm(lines, module=None, filename=None):
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template, inherited_members)
where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name),
*template* the value of the :template: option, and *inherited_members*
is the value of the :inherited-members: option.
*toctree*, *template*, and *inherited_members* are ``None`` if the
directive does not have the corresponding options set.
.. note::
This is a slightly modified version of
``sphinx.ext.autosummary.generate.find_autosummary_in_lines``
which recognizes the ``inherited-members`` option.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(
r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(
r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
inherited_members_arg_re = re.compile(r'^\s+:inherited-members:\s*$')
no_inherited_members_arg_re = re.compile(r'^\s+:no-inherited-members:\s*$')
documented = []
toctree = None
template = None
inherited_members = None
current_module = module
in_autosummary = False
base_indent = ""
for line in lines:
if in_autosummary:
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = os.path.join(os.path.dirname(filename),
toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
m = inherited_members_arg_re.match(line)
if m:
inherited_members = True
continue
m = no_inherited_members_arg_re.match(line)
if m:
inherited_members = False
continue
if line.strip().startswith(':'):
warn(line)
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip()
if name.startswith('~'):
name = name[1:]
if current_module and \
not name.startswith(current_module + '.'):
name = "%s.%s" % (current_module, name)
documented.append((name, toctree, template,
inherited_members))
continue
if not line.strip() or line.startswith(base_indent + " "):
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
base_indent = m.group(1)
toctree = None
template = None
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(find_autosummary_in_docstring(
current_module, filename=filename))
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented | /saba-0.1.1a0.tar.gz/saba-0.1.1a0/astropy_helpers/astropy_helpers/sphinx/ext/utils.py | 0.435421 | 0.27197 | utils.py | pypi |
# Implementation note:
# The 'automodapi' directive is not actually implemented as a docutils
# directive. Instead, this extension searches for the 'automodapi' text in
# all sphinx documents, and replaces it where necessary from a template built
# into this extension. This is necessary because automodsumm (and autosummary)
# use the "builder-inited" event, which comes before the directives are
# actually built.
import inspect
import os
import re
import sys
from .utils import find_mod_objs
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode
automod_templ_modheader = """
{modname} {pkgormod}
{modhds}{pkgormodhds}
{automoduleline}
"""
automod_templ_classes = """
Classes
{clshds}
.. automodsumm:: {modname}
:classes-only:
{clsfuncoptions}
"""
automod_templ_funcs = """
Functions
{funchds}
.. automodsumm:: {modname}
:functions-only:
{clsfuncoptions}
"""
automod_templ_inh = """
Class Inheritance Diagram
{clsinhsechds}
.. automod-diagram:: {modname}
:private-bases:
:parts: 1
{allowedpkgnms}
"""
_automodapirex = re.compile(r'^(?:\s*\.\.\s+automodapi::\s*)([A-Za-z0-9_.]+)'
r'\s*$((?:\n\s+:[a-zA-Z_\-]+:.*$)*)',
flags=re.MULTILINE)
# the last group of the above regex is intended to go into finall with the below
_automodapiargsrex = re.compile(r':([a-zA-Z_\-]+):(.*)$', flags=re.MULTILINE)
def automodapi_replace(sourcestr, app, dotoctree=True, docname=None,
warnings=True):
"""
Replaces `sourcestr`'s entries of ".. automdapi::" with the
automodapi template form based on provided options.
This is used with the sphinx event 'source-read' to replace
`automodapi`_ entries before sphinx actually processes them, as
automodsumm needs the code to be present to generate stub
documentation.
Parameters
----------
sourcestr : str
The string with sphinx source to be checked for automodapi
replacement.
app : `sphinx.application.Application`
The sphinx application.
dotoctree : bool
If `True`, a ":toctree:" option will be added in the "..
automodsumm::" sections of the template, pointing to the
appropriate "generated" directory based on the Astropy convention
(e.g. in ``docs/api``)
docname : str
The name of the file for this `sourcestr` (if known - if not, it
can be `None`). If not provided and `dotoctree` is `True`, the
generated files may end up in the wrong place.
warnings : bool
If `False`, all warnings that would normally be issued are
silenced.
Returns
-------
newstr :str
The string with automodapi entries replaced with the correct
sphinx markup.
"""
spl = _automodapirex.split(sourcestr)
if len(spl) > 1: # automodsumm is in this document
if dotoctree:
toctreestr = ':toctree: '
dirnm = app.config.automodapi_toctreedirnm
if not dirnm.endswith("/"):
dirnm += "/"
if docname is not None:
toctreestr += '../' * docname.count('/') + dirnm
else:
toctreestr += dirnm
else:
toctreestr = ''
newstrs = [spl[0]]
for grp in range(len(spl) // 3):
modnm = spl[grp * 3 + 1]
# find where this is in the document for warnings
if docname is None:
location = None
else:
location = (docname, spl[0].count('\n'))
# initialize default options
toskip = []
inhdiag = maindocstr = top_head = True
hds = '-^'
allowedpkgnms = []
# look for actual options
unknownops = []
inherited_members = None
for opname, args in _automodapiargsrex.findall(spl[grp * 3 + 2]):
if opname == 'skip':
toskip.append(args.strip())
elif opname == 'no-inheritance-diagram':
inhdiag = False
elif opname == 'no-main-docstr':
maindocstr = False
elif opname == 'headings':
hds = args
elif opname == 'no-heading':
top_head = False
elif opname == 'allowed-package-names':
allowedpkgnms.append(args.strip())
elif opname == 'inherited-members':
inherited_members = True
elif opname == 'no-inherited-members':
inherited_members = False
else:
unknownops.append(opname)
# join all the allowedpkgnms
if len(allowedpkgnms) == 0:
allowedpkgnms = ''
onlylocals = True
else:
allowedpkgnms = ':allowed-package-names: ' + ','.join(allowedpkgnms)
onlylocals = allowedpkgnms
# get the two heading chars
if len(hds) < 2:
msg = 'Not enough headings (got {0}, need 2), using default -^'
if warnings:
app.warn(msg.format(len(hds)), location)
hds = '-^'
h1, h2 = hds.lstrip()[:2]
# tell sphinx that the remaining args are invalid.
if len(unknownops) > 0 and app is not None:
opsstrs = ','.join(unknownops)
msg = 'Found additional options ' + opsstrs + ' in automodapi.'
if warnings:
app.warn(msg, location)
ispkg, hascls, hasfuncs = _mod_info(modnm, toskip, onlylocals=onlylocals)
# add automodule directive only if no-main-docstr isn't present
if maindocstr:
automodline = '.. automodule:: {modname}'.format(modname=modnm)
else:
automodline = ''
if top_head:
newstrs.append(automod_templ_modheader.format(
modname=modnm,
modhds=h1 * len(modnm),
pkgormod='Package' if ispkg else 'Module',
pkgormodhds=h1 * (8 if ispkg else 7),
automoduleline=automodline))
else:
newstrs.append(automod_templ_modheader.format(
modname='',
modhds='',
pkgormod='',
pkgormodhds='',
automoduleline=automodline))
# construct the options for the class/function sections
# start out indented at 4 spaces, but need to keep the indentation.
clsfuncoptions = []
if toctreestr:
clsfuncoptions.append(toctreestr)
if toskip:
clsfuncoptions.append(':skip: ' + ','.join(toskip))
if allowedpkgnms:
clsfuncoptions.append(allowedpkgnms)
if hascls: # This makes no sense unless there are classes.
if inherited_members is True:
clsfuncoptions.append(':inherited-members:')
if inherited_members is False:
clsfuncoptions.append(':no-inherited-members:')
clsfuncoptionstr = '\n '.join(clsfuncoptions)
if hasfuncs:
newstrs.append(automod_templ_funcs.format(
modname=modnm,
funchds=h2 * 9,
clsfuncoptions=clsfuncoptionstr))
if hascls:
newstrs.append(automod_templ_classes.format(
modname=modnm,
clshds=h2 * 7,
clsfuncoptions=clsfuncoptionstr))
if inhdiag and hascls:
# add inheritance diagram if any classes are in the module
newstrs.append(automod_templ_inh.format(
modname=modnm,
clsinhsechds=h2 * 25,
allowedpkgnms=allowedpkgnms))
newstrs.append(spl[grp * 3 + 3])
newsourcestr = ''.join(newstrs)
if app.config.automodapi_writereprocessed:
# sometimes they are unicode, sometimes not, depending on how
# sphinx has processed things
if isinstance(newsourcestr, text_type):
ustr = newsourcestr
else:
ustr = newsourcestr.decode(app.config.source_encoding)
if docname is None:
with open(os.path.join(app.srcdir, 'unknown.automodapi'), 'a') as f:
f.write('\n**NEW DOC**\n\n')
f.write(ustr)
else:
env = app.builder.env
# Determine the filename associated with this doc (specifically
# the extension)
filename = docname + os.path.splitext(env.doc2path(docname))[1]
filename += '.automodapi'
with open(os.path.join(app.srcdir, filename), 'w') as f:
f.write(ustr)
return newsourcestr
else:
return sourcestr
def _mod_info(modname, toskip=[], onlylocals=True):
"""
Determines if a module is a module or a package and whether or not
it has classes or functions.
"""
hascls = hasfunc = False
for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)):
if localnm not in toskip:
hascls = hascls or inspect.isclass(obj)
hasfunc = hasfunc or inspect.isroutine(obj)
if hascls and hasfunc:
break
# find_mod_objs has already imported modname
# TODO: There is probably a cleaner way to do this, though this is pretty
# reliable for all Python versions for most cases that we care about.
pkg = sys.modules[modname]
ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and
os.path.split(pkg.__file__)[1].startswith('__init__.py'))
return ispkg, hascls, hasfunc
def process_automodapi(app, docname, source):
source[0] = automodapi_replace(source[0], app, True, docname)
def setup(app):
# need automodsumm for automodapi
app.setup_extension('astropy_helpers.sphinx.ext.automodsumm')
app.connect('source-read', process_automodapi)
app.add_config_value('automodapi_toctreedirnm', 'api', True)
app.add_config_value('automodapi_writereprocessed', False, True) | /saba-0.1.1a0.tar.gz/saba-0.1.1a0/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py | 0.526586 | 0.176069 | automodapi.py | pypi |
# Example PyPI (Python Package Index) Package & Tutorial / Instruction / Workflow for 2021
[](https://pypi.org/project/example-pypi-package/) [](https://github.com/tomchen/sababa/releases) [](https://github.com/tomchen/sababa/actions) [](https://github.com/tomchen/sababa/blob/main/LICENSE)
This is an example [PyPI](https://pypi.org/) (Python Package Index) package set up with automated tests and package publishing workflow using GitHub Actions CI/CD. It is made primarily for GitHub + VS Code (Windows / Mac / Linux) users who are about to write and publish their first PyPI package. The package could serve as a starter / boilerplate / demo and the tutorial could give you a quick and concise explaination to solve some small but annoying problems you might encounter, such as package / module name confusion, and VS Code test configuration issues.
<details><summary><strong>Differences from pypa/sampleproject (click to show/hide)</strong></summary>
This example package is inspired by / based on the [official sample project pypa/sampleproject](https://github.com/pypa/sampleproject), but this package:
- is a simplified version of pypa/sampleproject (and the [official Python Packaging User Guide](https://packaging.python.org/))
- uses GitHub Actions for both testing and publishing, instead of Travis CI
- is tested when pushing `master` or `main` branch, and is published when create a release
- includes test files in the source distribution
- uses **setup.cfg** for [version single-sourcing](https://packaging.python.org/guides/single-sourcing-package-version/) (setuptools 46.4.0+)
- has **.vscode\settings.json** and **vscode.env** which adds **src/** folder to `PYTHONPATH`, so that test files don't have linting errors and may run with pytest in VS Code
- does not use flake8 for automated linting - it is sometimes too strict and inflexible, you may use pylint locally instead
- has this tutorial that covers everything you need to know in one page. Everything that might not be very useful, is hidden in collapsible sections that you can click to show
- has **[.editorconfig](https://editorconfig.org/#download)** file
</details>
## Make necessary changes
### Use as a template
[](https://github.com/tomchen/sababa/generate)
(Click the above button to use this example package as a template for your new GitHub repo, this will initialize a new repository and my commits will not be in your git history)
(If you do not use GitHub, you can [download the archive of the example package](https://github.com/tomchen/sababa/archive/main.zip))
### Package, module name
Many use a same package and module name, you could definitely do that. But this example package and its module's names are different: `sababa` and `sababa`.
Open `sababa` folder with Visual Studio Code, <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>F</kbd> (Windows / Linux) or <kbd>Cmd</kbd> + <kbd>Shift</kbd> + <kbd>F</kbd> (MacOS) to find all occurrences of both names and replace them with your package and module's names. Also remember to change the name of the folder **src/sababa**.
Simply and very roughly speaking, package name is used in `pip install <PACKAGENAME>` and module name is used in `import <MODULENAME>`. Both names should consist of lowercase basic letters (a-z). They may have underscores (`_`) if you really need them. Hyphen-minus (`-`) should not be used.
You'll also need to make sure the URL "https://pypi.org/project/example-pypi-package/" (replace `example-pypi-package` by your package name, with all `_` becoming `-`) is not occupied.
<details><summary><strong>Details on naming convention (click to show/hide)</strong></summary>
Underscores (`_`) can be used but such use is discouraged. Numbers can be used if the name does not start with a number, but such use is also discouraged.
Name starting with a number and/or containing hyphen-minus (`-`) should not be used: although technically legal, such name causes a lot of trouble − users have to use `importlib` to import it.
Don't be fooled by the URL "[pypi.org/project/example-pypi-package/](https://pypi.org/project/example-pypi-package/)" and the name "example-pypi-package" on pypi.org. pypi.org and pip system convert all `_` to `-` and use the latter on the website / in `pip` command, but the real name is still with `_`, which users should use when importing the package.
There's also [namespace](https://packaging.python.org/guides/packaging-namespace-packages/) to use if you need sub-packages.
</details>
### Other changes
Make necessary changes in **setup.py**.
The package's version number `__version__` is in **src/sababa/\_\_init\_\_.py**. You may want to change that.
The example package is designed to be compatible with Python 3.6, 3.7, 3.8, 3.9, and will be tested against these versions. If you need to change the version range, you should change:
- `classifiers`, `python_requires` in **setup.py**
- `envlist` in **tox.ini**
- `matrix: python:` in **.github/workflows/test.yml**
If you plan to upload to [TestPyPI](https://test.pypi.org/) which is a playground of [PyPI](https://pypi.org/) for testing purpose, change `twine upload --repository pypi dist/*` to `twine upload --repository testpypi dist/*` in the file **.github/workflows/release.yml**.
## Development
### pip
pip is a Python package manager. You already have pip if you use Python 3.4 and later version which include it by default. Read [this](https://pip.pypa.io/en/stable/installing/#do-i-need-to-install-pip) to know how to check whether pip is installed. Read [this](https://pip.pypa.io/en/stable/installing/#installing-with-get-pip-py) if you need to install it.
### Use VS Code
Visual Studio Code is the most popular code editor today, our example package is configured to work with VS Code.
Install VS Code extension "[Python](https://marketplace.visualstudio.com/items?itemName=ms-python.python)".
"Python" VS Code extension will suggest you install pylint. Also, the example package is configured to use pytest with VS Code + Python extensions, so, install pylint and pytest:
```bash
pip install pylint pytest
```
(It's likely you will be prompted to install them, if that's the case, you don't need to type and execute the command)
**vscode.env**'s content is now `PYTHONPATH=/;src/;${PYTHONPATH}` which is good for Windows. If you use Linux or MacOS, you need to change it to `PYTHONPATH=/:src/:${PYTHONPATH}` (replacing `;` with `:`). If the PATH is not properly set, you'll see linting errors in test files and pytest won't be able to run **tests/test\_\*.py** files correctly.
Close and reopen VS Code. You can now click the lab flask icon in the left menu and run all tests there, with pytest. pytest seems better than the standard unittest framework, it supports `unittest` thus you can keep using `import unittest` in your test files.
The example package also has a **.editorconfig** file. You may install VS Code extension "[EditorConfig for VS Code](https://marketplace.visualstudio.com/items?itemName=EditorConfig.EditorConfig)" that uses the file. With current configuration, the EditorConfig tool can automatically use spaces (4 spaces for .py, 2 for others) for indentation, set `UTF-8` encoding, `LF` end of lines, trim trailing whitespaces in non Markdown files, etc.
In VS Code, you can go to File -> Preferences -> Settings, type "Python Formatting Provider" in the search box, and choose one of the three Python code formatting tools (autopep8, black and yapf), you'll be prompted to install it. The shortcuts for formatting of a code file are <kbd>Shift</kbd> + <kbd>Alt</kbd> + <kbd>F</kbd> (Windows); <kbd>Shift</kbd> + <kbd>Option (Alt)</kbd> + <kbd>F</kbd> (MacOS); <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>I</kbd> (Linux).
### Write your package
In **src/sababa/** (`sababa` should have been replaced by your module name) folder, rename **module1.py** and write your code in it. Add more module .py files if you need to.
### Write your tests
In **tests/** folder, rename **test_module1.py** (to **test\_\*.py**) and write your unit test code (with [unittest](https://docs.python.org/3/library/unittest.html)) in it. Add more **test\_\*.py** files if you need to.
<details><summary><strong>The testing tool `tox` will be used in the automation with GitHub Actions CI/CD. If you want to use `tox` locally, click to read the "Use tox locally" section</strong></summary>
### Use tox locally
Install tox and run it:
```bash
pip install tox
tox
```
In our configuration, tox runs a check of source distribution using [check-manifest](https://pypi.org/project/check-manifest/) (which requires your repo to be git-initialized (`git init`) and added (`git add .`) at least), setuptools's check, and unit tests using pytest. You don't need to install check-manifest and pytest though, tox will install them in a separate environment.
The automated tests are run against several Python versions, but on your machine, you might be using only one version of Python, if that is Python 3.9, then run:
```bash
tox -e py39
```
</details>
If you add more files to the root directory (**sababa/**), you'll need to add your file to `check-manifest --ignore` list in **tox.ini**.
<details><summary><strong>Thanks to GitHub Actions' automated process, you don't need to generate distribution files locally. But if you insist, click to read the "Generate distribution files" section</strong></summary>
## Generate distribution files
### Install tools
Install or upgrade `setuptools` and `wheel`:
```bash
python -m pip install --user --upgrade setuptools wheel
```
(If `python3` is the command on your machine, change `python` to `python3` in the above command, or add a line `alias python=python3` to **~/.bashrc** or **~/.bash_aliases** file if you use bash on Linux)
### Generate `dist`
From `sababa` directory, run the following command, in order to generate production version for source distribution (sdist) in `dist` folder:
```bash
python setup.py sdist bdist_wheel
```
### Install locally
Optionally, you can install dist version of your package locally before uploading to [PyPI](https://pypi.org/) or [TestPyPI](https://test.pypi.org/):
```bash
pip install dist/sababa-0.1.0.tar.gz
```
(You may need to uninstall existing package first:
```bash
pip uninstall sababa
```
There may be several installed packages with the same name, so run `pip uninstall` multiple times until it says no more package to remove.)
</details>
## Upload to PyPI
### Register on PyPI and get token
Register an account on [PyPI](https://pypi.org/), go to [Account settings § API tokens](https://pypi.org/manage/account/#api-tokens), "Add API token". The PyPI token only appears once, copy it somewhere. If you missed it, delete the old and add a new token.
(Register a [TestPyPI](https://test.pypi.org/) account if you are uploading to TestPyPI)
### Set secret in GitHub repo
On the page of your newly created or existing GitHub repo, click **Settings** -> **Secrets** -> **New repository secret**, the **Name** should be `PYPI_API_TOKEN` and the **Value** should be your PyPI token (which starts with `pypi-`).
### Push or release
The example package has automated tests and upload (publishing) already set up with GitHub Actions:
- Every time you `git push` or a pull request is submitted on your `master` or `main` branch, the package is automatically tested against the desired Python versions with GitHub Actions.
- Every time a new release (either the initial version or an updated version) is created, the latest version of the package is automatically uploaded to PyPI with GitHub Actions.
### View it on pypi.org
After your package is published on PyPI, go to [https://pypi.org/project/example-pypi-package/](https://pypi.org/project/example-pypi-package/) (`_` becomes `-`). Copy the command on the page, execute it to download and install your package from PyPI. (or test.pypi.org if you use that)
If you want to modify the description / README of your package on pypi.org, you have to publish a new version.
<details><summary><strong>If you publish your package to PyPI manually, click to read</strong></summary>
### Install Twine
Install or upgrade Twine:
```bash
python -m pip install --user --upgrade twine
```
Create a **.pypirc** file in your **$HOME** (**~**) directory, its content should be:
```ini
[pypi]
username = __token__
password = <PyPI token>
```
(Use `[testpypi]` instead of `[pypi]` if you are uploading to [TestPyPI](https://test.pypi.org/))
Replace `<PyPI token>` with your real PyPI token (which starts with `pypi-`).
(if you don't manually create **$HOME/.pypirc**, you will be prompted for a username (which should be `__token__`) and password (which should be your PyPI token) when you run Twine)
### Upload
Run Twine to upload all of the archives under **dist** folder:
```bash
python -m twine upload --repository pypi dist/*
```
(use `testpypi` instead of `pypi` if you are uploading to [TestPyPI](https://test.pypi.org/))
### Update
When you finished developing a newer version of your package, do the following things.
Modify the version number `__version__` in **src\sababa\_\_init\_\_.py**.
Delete all old versions in **dist**.
Run the following command again to regenerate **dist**:
```bash
python setup.py sdist bdist_wheel
```
Run the following command again to upload **dist**:
```bash
python -m twine upload --repository pypi dist/*
```
(use `testpypi` instead of `pypi` if needed)
</details>
## References
- [Python Packaging Authority (PyPA)'s sample project](https://github.com/pypa/sampleproject)
- [PyPA's Python Packaging User Guide](https://packaging.python.org/tutorials/packaging-projects/)
- [Stackoverflow questions and answers](https://stackoverflow.com/questions/41093648/how-to-test-that-pypi-install-will-work-before-pushing-to-pypi-python)
- [GitHub Actions Guides: Building and testing Python](https://docs.github.com/en/free-pro-team@latest/actions/guides/building-and-testing-python)
Btw, if you want to publish TypeScript (JavaScript) package to the npm registry, go to [Example TypeScript Package ready to be published on npm for 2021](https://github.com/tomchen/example-typescript-package).
| /sababa-0.0.3.tar.gz/sababa-0.0.3/README-original.md | 0.499268 | 0.926769 | README-original.md | pypi |
import os
import numpy as np
import pandas as pd
import xarray as xr
from .io import COL_GID
from .io import COL_MID
from .io import COL_QSIM
__all__ = ['fdc', 'sfdc', 'precalc_sfdcs']
def fdc(flows: np.array, steps: int = 101, col_name: str = 'Q') -> pd.DataFrame:
"""
Compute flow duration curve (exceedance probabilities) from a list of flows
Args:
flows: array of flows
steps: number of steps (exceedance probabilities) to use in the FDC
col_name: name of the column in the returned dataframe
Returns:
pd.DataFrame with index 'p_exceed' and columns 'Q' (or col_name)
"""
# calculate the FDC and save to parquet
exceed_prob = np.linspace(100, 0, steps)
fdc_flows = np.nanpercentile(flows, exceed_prob)
df = pd.DataFrame(fdc_flows, columns=[col_name, ], index=exceed_prob)
df.index.name = 'p_exceed'
return df
def sfdc(sim_fdc: pd.DataFrame, obs_fdc: pd.DataFrame) -> pd.DataFrame:
"""
Compute the scalar flow duration curve (exceedance probabilities) from two flow duration curves
Args:
sim_fdc: simulated flow duration curve
obs_fdc: observed flow duration curve
Returns:
pd.DataFrame with index (exceedance probabilities) and a column of scalars
"""
scalars_df = pd.DataFrame(
np.divide(sim_fdc, obs_fdc.values.flatten()),
columns=['scalars', ],
index=sim_fdc.index
)
scalars_df.replace(np.inf, np.nan, inplace=True)
scalars_df.dropna(inplace=True)
return scalars_df
def precalc_sfdcs(assign_row: pd.DataFrame, gauge_data: str, hindcast_zarr: str) -> pd.DataFrame:
"""
Compute the scalar flow duration curve (exceedance probabilities) from two flow duration curves
Args:
assign_row: a single row from the assignment table
gauge_data: string path to the directory of observed data
hindcast_zarr: string path to the hindcast streamflow dataset
Returns:
pd.DataFrame with index (exceedance probabilities) and a column of scalars
"""
# todo
# read the simulated data
hz = xr.open_mfdataset(hindcast_zarr, concat_dim='rivid', combine='nested', parallel=True, engine='zarr')
sim_df = hz['Qout'][:, hz.rivid.values == int(assign_row[COL_MID])].values
sim_df = pd.DataFrame(sim_df, index=pd.to_datetime(hz['time'].values), columns=[COL_QSIM])
sim_df = sim_df[sim_df.index.year >= 1980]
# read the observed data
obs_df = pd.read_csv(os.path.join(gauge_data, f'{assign_row[COL_GID]}.csv'), index_col=0)
obs_df.index = pd.to_datetime(obs_df.index)
sim_fdcs = []
obs_fdcs = []
for month in range(1, 13):
sim_fdcs.append(fdc(sim_df[sim_df.index.month == month].values.flatten()).values.flatten())
obs_fdcs.append(fdc(obs_df[obs_df.index.month == month].values.flatten()).values.flatten())
sim_fdcs.append(fdc(sim_df.values.flatten()))
obs_fdcs.append(fdc(obs_df.values.flatten()))
sim_fdcs = np.array(sim_fdcs)
obs_fdcs = np.array(obs_fdcs)
sfdcs = np.divide(sim_fdcs, obs_fdcs)
return sfdcs | /saber_hbc-0.9.0-py3-none-any.whl/saber/fdc.py | 0.69285 | 0.617037 | fdc.py | pypi |
import glob
import logging
import math
import os
from collections.abc import Iterable
import joblib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kneed import KneeLocator
from natsort import natsorted
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import silhouette_samples
from .io import COL_CID
from .io import COL_MID
from .io import get_dir
from .io import list_cluster_files
from .io import read_table
from .io import write_table
__all__ = [
'cluster',
'generate',
'summarize_fit', 'calc_silhouette',
'plot_fit_metrics', 'plot_centers', 'plot_clusters', 'plot_silhouettes',
'predict_labels'
]
logger = logging.getLogger(__name__)
def cluster(plot: bool = False) -> None:
"""
Train k-means cluster models, calculate fit metrics, and generate plots
Args:
plot: boolean flag to indicate whether plots should be generated after clustering
Returns:
None
"""
logger.info('Generate Clusters')
x_fdc_train = read_table("cluster_data").values
generate(x=x_fdc_train)
summarize_fit()
# calc_silhouette(workdir, x=x_fdc_train, n_clusters=range(2, 10))
if not plot:
return
logger.info('Create Plots')
plot_clusters(x=x_fdc_train)
plot_centers()
plot_fit_metrics()
# plot_silhouettes(workdir)
return
def generate(x: np.ndarray = None, max_clusters: int = 13) -> None:
"""
Trains scikit-learn MiniBatchKMeans models and saves as pickle
Args:
x: a numpy array of the prepared FDC data
max_clusters: maximum number of clusters to train
Returns:
None
"""
if x is None:
x = read_table('cluster_data').values
# build the kmeans model for a range of cluster numbers
for n_clusters in range(2, max_clusters + 1):
logger.info(f'Clustering n={n_clusters}')
kmeans = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=100)
kmeans.fit_predict(x)
joblib.dump(kmeans, os.path.join(get_dir('clusters'), f'kmeans-{n_clusters}.pickle'))
return
def predict_labels(n_clusters: int, x: pd.DataFrame = None) -> pd.DataFrame:
"""
Predict the cluster labels for a set number of FDCs
Args:
n_clusters: number of cluster model to use for prediction
x: A dataframe with 1 row per FDC (stream) and 1 column per FDC value. Index is the stream's ID.
Returns:
None
"""
if x is None:
x = read_table('cluster_data')
model = joblib.load(os.path.join(get_dir('clusters'), f'kmeans-{n_clusters}.pickle'))
labels_df = pd.DataFrame(
np.transpose([model.predict(x.values), x.index]),
columns=[COL_CID, COL_MID]
)
write_table(labels_df, 'cluster_table')
return labels_df
def summarize_fit() -> None:
"""
Generate a summary of the clustering results save the centers and labels to parquet
Returns:
None
"""
summary = {'number': [], 'inertia': [], 'n_iter': []}
labels = []
for model_file in list_cluster_files(n_clusters='all'):
logger.info(f'Post Processing {os.path.basename(model_file)}')
kmeans = joblib.load(model_file)
n_clusters = int(kmeans.n_clusters)
labels.append(kmeans.labels_.flatten())
# save cluster centroids to table - columns are the cluster number, rows are the centroid FDC values
write_table(
pd.DataFrame(np.transpose(kmeans.cluster_centers_), columns=np.array(range(n_clusters)).astype(str)),
f'cluster_centers_{n_clusters}')
# save the summary stats from this model
summary['number'].append(n_clusters)
summary['inertia'].append(kmeans.inertia_)
summary['n_iter'].append(kmeans.n_iter_)
# save the summary results as a csv
sum_df = pd.DataFrame(summary)
sum_df['knee'] = KneeLocator(summary['number'], summary['inertia'], curve='convex', direction='decreasing').knee
write_table(sum_df, 'cluster_metrics')
return
def calc_silhouette(x: np.ndarray, n_clusters: int or Iterable = 'all', samples: int = 75_000) -> None:
"""
Calculate the silhouette score for the given number of clusters
Args:
x: a numpy array of the prepared FDC data
n_clusters: the number of clusters to calculate the silhouette score for
samples: the number of samples to use for the silhouette score calculation
Returns:
None
"""
if x is None:
x = read_table('cluster_data').values
fdc_df = pd.DataFrame(x)
summary = {'number': [], 'silhouette': []}
random_shuffler = np.random.default_rng()
for model_file in list_cluster_files(n_clusters):
logger.info(f'Calculating Silhouettes for {os.path.basename(model_file)}')
kmeans = joblib.load(model_file)
# randomly sample fdcs from each cluster
fdc_df['label'] = kmeans.labels_
ss_df = pd.DataFrame(columns=fdc_df.columns.to_list())
for i in range(int(kmeans.n_clusters)):
values = fdc_df[fdc_df['label'] == i].drop(columns='label').values
random_shuffler.shuffle(values)
values = values[:int(samples)]
tmp = pd.DataFrame(values)
tmp['label'] = i
ss_df = pd.concat([ss_df, tmp])
# calculate their silhouette scores
ss_df['silhouette'] = silhouette_samples(ss_df.drop(columns='label').values, ss_df['label'].values, n_jobs=-1)
ss_df['silhouette'] = ss_df['silhouette'].round(3)
ss_df.columns = ss_df.columns.astype(str)
write_table(ss_df, f'cluster_sscores_{kmeans.n_clusters}')
# save the summary stats from this model
summary['number'].append(kmeans.n_clusters)
summary['silhouette'].append(ss_df['silhouette'].mean())
# save the summary stats
write_table(pd.DataFrame(summary), 'cluster_sscores')
return
def plot_clusters(x: np.ndarray = None, n_clusters: int or Iterable = 'all',
max_cols: int = 3, plt_width: int = 2, plt_height: int = 2, n_lines: int = 2_500) -> None:
"""
Generate figures of the clustered FDC's
Args:
x: a numpy array of the prepared FDC data
n_clusters: number of clusters to create figures for
max_cols: maximum number of columns (subplots) in the figure
plt_width: width of each subplot in inches
plt_height: height of each subplot in inches
n_lines: max number of lines to plot in each subplot
Returns:
None
"""
if x is None:
x = read_table('cluster_data').values
size = x.shape[1]
x_values = np.linspace(0, size, 5)
x_ticks = np.linspace(0, 100, 5).astype(int)
random_shuffler = np.random.default_rng()
for model_file in list_cluster_files(n_clusters):
logger.info(f'Plotting Clusters {os.path.basename(model_file)}')
# load the model and calculate
kmeans = joblib.load(model_file)
n_clusters = int(kmeans.n_clusters)
n_cols = min(n_clusters, max_cols)
n_rows = math.ceil(n_clusters / n_cols)
# initialize the figure and labels
fig, axs = plt.subplots(
n_rows,
n_cols,
figsize=(plt_width * n_cols + 1, plt_height * n_rows + 1),
dpi=750,
squeeze=False,
tight_layout=True,
sharey='row'
)
fig.suptitle("KMeans FDC Clustering")
fig.supxlabel('Exceedance Probability (%)')
fig.supylabel('Discharge Z-Score')
for i, ax in enumerate(fig.axes[:n_clusters]):
ax.set_title(f'Cluster {i + 1} (n = {np.sum(kmeans.labels_ == i)})')
ax.set_xlim(0, size)
ax.set_xticks(x_values, x_ticks)
ax.set_ylim(-2, 4)
fdc_sample = x[kmeans.labels_ == i]
random_shuffler.shuffle(fdc_sample)
fdc_sample = fdc_sample[:n_lines]
for j in fdc_sample:
ax.plot(j.ravel(), "k-")
ax.plot(kmeans.cluster_centers_[i].flatten(), "r-")
# turn off plotting axes which are blank (when ax number > n_clusters)
for ax in fig.axes[n_clusters:]:
ax.axis('off')
fig.savefig(os.path.join(get_dir('clusters'), f'figure-clusters-{n_clusters}.png'))
plt.close(fig)
return
def plot_silhouettes(workdir: str, plt_width: int = 3, plt_height: int = 3) -> None:
"""
Plot the silhouette scores for each cluster.
Based on https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html
Args:
workdir: path to the project directory
plt_width: width of each subplot in inches
plt_height: height of each subplot in inches
Returns:
None
"""
logger.info('Generating Silhouette Diagrams')
clusters_dir = os.path.join(workdir, 'clusters')
for sscore_table in natsorted(glob.glob(os.path.join(clusters_dir, 'cluster_sscores_*.parquet'))):
logger.info(f'Generating Silhouette Diagram: {os.path.basename(sscore_table)}')
n_clusters = int(sscore_table.split('_')[-1].split('.')[0])
sscore_df = pd.read_parquet(sscore_table, engine='fastparquet')
centers_df = read_table(f'cluster_centers_{n_clusters}')
mean_ss = sscore_df['silhouette'].mean()
# initialize the figure
fig, (ax1, ax2) = plt.subplots(
nrows=1,
ncols=2,
figsize=(plt_width * 2 + 1, plt_height + 1),
dpi=600,
tight_layout=True,
)
# Plot 1 titles and labels
ax1.set_title(f"Silhouette Plot (mean={mean_ss:.3f})")
ax1.set_xlabel("Silhouette Score")
ax1.set_ylabel("Cluster Label")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Plot 2 titles and labels
ax2.set_title("Cluster Centers")
ax2.set_xlabel("Exceedance Probability (%)")
ax2.set_ylabel("Discharge Z-Score")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=mean_ss, color="red", linestyle="--")
y_lower = 10
for sub_cluster in range(int(n_clusters)):
# select the rows applicable to the current sub cluster
cluster_sscores = sscore_df[sscore_df['label'] == sub_cluster]['silhouette'].values.flatten()
cluster_sscores.sort()
n = cluster_sscores.shape[0]
y_upper = y_lower + n
color = cm.nipy_spectral(sub_cluster / int(n_clusters))
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
cluster_sscores,
facecolor=color,
edgecolor=color,
alpha=0.7,
)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * n, f'{sub_cluster + 1}: n={n:,}')
# plot the cluster center
ax2.plot(centers_df[f'{sub_cluster}'].values, alpha=0.7, c=color, label=f'Cluster {sub_cluster + 1}')
# add some buffer before the next cluster
y_lower = y_upper + 10
fig.savefig(os.path.join(clusters_dir, f'figure-silhouette-diagram-{n_clusters}.png'))
plt.close(fig)
return
def plot_centers(plt_width: int = 2, plt_height: int = 2, max_cols: int = 3) -> None:
"""
Plot the cluster centers for each cluster.
Args:
plt_width: width of each subplot in inches
plt_height: height of each subplot in inches
max_cols: maximum number of columns of subplots in the figure
Returns:
None
"""
logger.info('Plotting Cluster Centers')
clusters_dir = get_dir('clusters')
for n_clusters in [4, 7, 10, 13]:
# count number of files to plot
centers_files = [os.path.join(clusters_dir, f'cluster_centers_{i}.parquet') for i in range(2, n_clusters + 1)]
n_files = len(centers_files)
n_cols = min(n_files, max_cols)
n_rows = math.ceil(n_files / n_cols)
# initialize the figure and labels
fig, axes = plt.subplots(
n_rows,
n_cols,
figsize=(plt_width * n_cols + 1.25, plt_height * n_rows + 1.25),
dpi=750,
squeeze=False,
tight_layout=True,
sharey='row',
sharex='col'
)
fig.suptitle('Cluster Centers', fontsize=16)
fig.supylabel('Discharge Z-Score')
fig.supxlabel('Exceedance Probability (%)')
for centers_table, ax in zip(centers_files, fig.axes[:n_files]):
n_clusters = int(centers_table.split('_')[-1].split('.')[0])
centers_df = pd.read_parquet(centers_table, engine='fastparquet')
for i in range(int(n_clusters)):
ax.plot(centers_df[f'{i}'].values, label=f'Cluster {i + 1}')
# Plot titles and labels
ax.set_title(f"k={n_clusters} clusters")
ax.set_xlim(0, 40)
ax.set_ylim(-2, 4)
fig.savefig(os.path.join(clusters_dir, f'figure-cluster-centers-{n_clusters}.png'))
plt.close(fig)
return
def plot_fit_metrics(plt_width: int = 4, plt_height: int = 4) -> None:
"""
Plot the cluster metrics, inertia and silhouette score, vs number of clusters
Args:
plt_width: width of each subplot in inches
plt_height: height of each subplot in inches
Returns:
None
"""
logger.info('Plotting Cluster Fit Metrics')
clusters_dir = get_dir('clusters')
df = read_table('cluster_metrics')
try:
df = df.merge(read_table('cluster_sscores'), on='number', how='outer')
except FileNotFoundError:
pass
df['number'] = df['number'].astype(int)
df['inertia'] = df['inertia'].astype(float)
# initialize the figure and labels
fig, ax = plt.subplots(
figsize=(plt_width, plt_height),
dpi=750,
tight_layout=True,
)
# Plot titles and labels
ax.set_title("Clustering Fit Metrics")
ax.set_xlabel("Number of Clusters")
ax.set_ylabel("Inertia")
ticks = np.arange(1, df['number'].max() + 2)
ax.set_xlim(ticks[0], ticks[-1])
ax.set_xticks(ticks)
ax.set_yticks([])
# plot the inertia
knee = int(df['knee'].values[0])
ax.plot(df['number'], df['inertia'], marker='o', label='Inertia')
ax.plot(knee, df[df['number'] == knee]['inertia'], marker='o', c='red', label='Knee')
# add the silhouette scores if they were calculated
if 'silhouette' in df.columns:
df.loc[df['silhouette'].isna(), 'silhouette'] = ''
ax2 = ax.twinx()
ax2.set_ylabel("Silhouette Score")
ax2.set_ylim(0, 1)
ax2.plot(df['number'], df['silhouette'], marker='o', c='green', label='Silhouette Score')
fig.savefig(os.path.join(clusters_dir, f'figure-fit-metrics.png'))
plt.close(fig)
return | /saber_hbc-0.9.0-py3-none-any.whl/saber/cluster.py | 0.811788 | 0.363506 | cluster.py | pypi |
import logging
import os
import contextily as cx
import geopandas as gpd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .io import COL_ASN_REASON
from .io import COL_CID
from .io import COL_GID
from .io import COL_MID
from .io import get_dir
from .io import read_gis
from .io import read_table
__all__ = ['create_maps', 'map_by_reason', 'map_by_cluster', 'map_unassigned', 'map_ids', ]
logger = logging.getLogger(__name__)
def create_maps(assign_df: pd.DataFrame = None, drain_gis: gpd.GeoDataFrame = None, prefix: str = '') -> None:
"""
Runs all the clip functions which create subsets of the drainage lines GIS dataset based on how they were assigned
for bias correction.
Args:
assign_df: the assignment table dataframe
drain_gis: a geodataframe of the drainage lines gis dataset
prefix: a prefix for names of the outputs to distinguish between data generated in separate instances
Returns:
None
"""
if assign_df is None:
assign_df = read_table('assign_table')
if drain_gis is None:
drain_gis = read_gis('drain_gis')
if type(drain_gis) == str:
gdf = gpd.read_file(drain_gis)
elif type(drain_gis) == gpd.GeoDataFrame:
gdf = drain_gis
else:
raise TypeError(f'Invalid type for drain_gis: {type(drain_gis)}')
map_by_reason(assign_df, gdf, prefix)
map_by_cluster(assign_df, gdf, prefix)
map_unassigned(assign_df, gdf, prefix)
return
def map_by_reason(assign_df: pd.DataFrame, drain_gis: str or gpd.GeoDataFrame, prefix: str = '') -> None:
"""
Creates Geopackage files in workdir/gis_outputs for each unique value in the assignment column
Args:
assign_df: the assignment table dataframe
drain_gis: path to a drainage line shapefile which can be clipped
prefix: a prefix for names of the outputs to distinguish between data generated at separate instances
Returns:
None
"""
# read the drainage line shapefile
if isinstance(drain_gis, str):
drain_gis = gpd.read_file(drain_gis)
# get the unique list of assignment reasons
for reason in assign_df[COL_ASN_REASON].unique():
logger.info(f'Creating GIS output for group: {reason}')
selector = drain_gis[COL_MID].astype(str).isin(assign_df[assign_df[COL_ASN_REASON] == reason][COL_MID])
subset = drain_gis[selector]
name = f'{f"{prefix}_" if prefix else ""}assignments_{reason}.gpkg'
if subset.empty:
logger.debug(f'Empty filter: No streams are assigned for {reason}')
continue
else:
subset.to_file(os.path.join(get_dir('gis'), name))
return
def map_by_cluster(assign_table: pd.DataFrame, drain_gis: str, prefix: str = '') -> None:
"""
Creates Geopackage files in workdir/gis_outputs of the drainage lines based on the fdc cluster they were assigned to
Args:
assign_table: the assignment table dataframe
drain_gis: path to a drainage line shapefile which can be clipped
prefix: optional, a prefix to prepend to each created file's name
Returns:
None
"""
if isinstance(drain_gis, str):
drain_gis = gpd.read_file(drain_gis)
for num in assign_table[COL_CID].unique():
logger.info(f'Creating GIS output for cluster: {num}')
gdf = drain_gis[drain_gis[COL_MID].astype(str).isin(assign_table[assign_table[COL_CID] == num][COL_MID])]
if gdf.empty:
logger.debug(f'Empty filter: No streams are assigned to cluster {num}')
continue
gdf.to_file(os.path.join(get_dir('gis'), f'{prefix}{"_" if prefix else ""}cluster-{int(num)}.gpkg'))
return
def map_unassigned(assign_table: pd.DataFrame, drain_gis: str, prefix: str = '') -> None:
"""
Creates Geopackage files in workdir/gis_outputs of the drainage lines which haven't been assigned a gauge yet
Args:
assign_table: the assignment table dataframe
drain_gis: path to a drainage line shapefile which can be clipped
prefix: optional, a prefix to prepend to each created file's name
Returns:
None
"""
logger.info('Creating GIS output for unassigned basins')
if isinstance(drain_gis, str):
drain_gis = gpd.read_file(drain_gis)
ids = assign_table[assign_table[COL_ASN_REASON] == 'unassigned'][COL_MID].values
subset = drain_gis[drain_gis[COL_MID].astype(str).isin(ids)]
if subset.empty:
logger.debug('Empty filter: No streams are unassigned')
return
savepath = os.path.join(get_dir('gis'), f'{prefix}{"_" if prefix else ""}assignments_unassigned.gpkg')
subset.to_file(savepath)
return
def map_ids(ids: list, drain_gis: str, prefix: str = '', id_column: str = COL_MID) -> None:
"""
Creates Geopackage files in workdir/gis_outputs of the subset of 'drain_shape' with an ID in the specified list
Args:
ids: any iterable containing a series of model_ids
drain_gis: path to the drainage shapefile to be clipped
prefix: optional, a prefix to prepend to each created file's name
id_column: name of the id column in the attributes of the shape table
Returns:
None
"""
if isinstance(drain_gis, str):
drain_gis = gpd.read_file(drain_gis)
name = f'{prefix}{"_" if prefix else ""}id_subset.gpkg'
drain_gis[drain_gis[id_column].isin(ids)].to_file(os.path.join(get_dir('gis'), name))
return
def histomaps(gdf: gpd.GeoDataFrame, metric: str, prct: str) -> None:
"""
Creates a histogram of the KGE2012 values for the validation set
Args:
gdf: a GeoDataFrame containing validation metrics
metric:name of th emetric to plot
prct: Percentile of the validation set used to generate the histogram
Returns:
None
"""
core_columns = [COL_MID, COL_GID, 'geometry']
# world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# world.plot(ax=axm, color='white', edgecolor='black')
colors = ['#dc112e', '#d6db12', '#da9707', '#13c208', '#0824c2']
bins = [-10, 0, 0.25, 0.5, 0.75, 1]
cmap = mpl.colors.ListedColormap(colors)
norm = mpl.colors.BoundaryNorm(boundaries=bins, ncolors=len(cmap.colors))
title = metric.replace('KGE2012', 'Kling Gupta Efficiency 2012 - ') + f' {prct}% Gauges Excluded'
hist_groups = []
hist_colors = []
categorize_by = [-np.inf, 0, 0.25, 0.5, 0.75, 1]
for idx in range(len(categorize_by) - 1):
gdfsub = gdf[gdf[metric] >= categorize_by[idx]]
gdfsub = gdfsub[gdfsub[metric] < categorize_by[idx + 1]]
if not gdfsub.empty:
hist_groups.append(gdfsub[metric].values)
hist_colors.append(colors[idx])
fig, (axh, axm) = plt.subplots(
1, 2, tight_layout=True, figsize=(9, 5), dpi=400, gridspec_kw={'width_ratios': [1, 1]})
fig.suptitle(title, fontsize=20)
median = round(gdf[metric].median(), 2)
axh.set_title(f'Histogram (Median = {median})')
axh.set_ylabel('Count')
axh.set_xlabel('KGE 2012')
axh.hist(hist_groups, color=hist_colors, bins=25, histtype='barstacked', edgecolor='black')
axh.axvline(median, color='k', linestyle='dashed', linewidth=3)
axm.set_title('Gauge Map')
axm.set_ylabel('Latitude')
axm.set_xlabel('Longitude')
axm.set_xticks([])
axm.set_yticks([])
gdf[core_columns + [metric, ]].to_crs(epsg=3857).plot(metric)
cx.add_basemap(ax=axm, zoom=9, source=cx.providers.Esri.WorldTopoMap, attribution='')
fig.savefig(os.path.join(get_dir('gis'), f'{metric}_{prct}.png'))
return | /saber_hbc-0.9.0-py3-none-any.whl/saber/gis.py | 0.785555 | 0.343713 | gis.py | pypi |
<p align="center">
<img src="img/saber_logo.png", style="height:150px">
</p>
<h1 align="center">
Saber
</h1>
<p align="center">
<a href="https://travis-ci.org/BaderLab/saber">
<img src="https://travis-ci.org/BaderLab/saber.svg?branch=master"
alt="Travis CI">
</a>
<a href="https://www.codacy.com/app/JohnGiorgi/Saber?utm_source=github.com&utm_medium=referral&utm_content=BaderLab/saber&utm_campaign=Badge_Grade">
<img src="https://api.codacy.com/project/badge/Grade/d122e87152d84f959ee6d97b71d616cb" alt='Codacy Status'/>
</a>
<a href='https://coveralls.io/github/BaderLab/saber?branch=master'>
<img src='https://coveralls.io/repos/github/BaderLab/saber/badge.svg?branch=master' alt='Coverage Status'/>
</a>
<a href='https://spacy.io'>
<img src='https://img.shields.io/badge/spaCy-v2-09a3d5.svg' alt='Spacy Version'/>
</a>
<a href='http://makeapullrequest.com'>
<img src='https://img.shields.io/badge/PRs-welcome-blue.svg?style=shields' alt='PRs Welcome'/>
</a>
<a href='https://opensource.org/licenses/MIT'>
<img src='https://img.shields.io/badge/License-MIT-blue.svg' alt='License'/>
</a>
<a href='https://colab.research.google.com/drive/1WD7oruVuTo6p_908MQWXRBdLF3Vw2MPo'>
<img src='https://img.shields.io/badge/launch-Google%20Colab-orange.svg' alt='Colab'/>
</a>
</p>
<p align="center"><b>Saber</b> (<b>S</b>equence <b>A</b>nnotator for <b>B</b>iomedical <b>E</b>ntities and <b>R</b>elations) is a deep-learning based tool for <b>information extraction</b> in the biomedical domain.
</p>
<p align="center">
<a href="#installation">Installation</a> •
<a href="#quickstart">Quickstart</a> •
<a href="#documentation">Documentation</a>
</p>
## Installation
To install Saber, you will need `python>=3.5`. If not already installed, `python>=3.5` can be installed via
- The [official installer](https://www.python.org/downloads/)
- [Homebrew](https://brew.sh), on MacOS (`brew install python3`)
- [Miniconda3](https://conda.io/miniconda.html) / [Anaconda3](https://www.anaconda.com/download/)
(OPTIONAL) Activate your virtual environment (see [below](#optional-creating-and-activating-virtual-environments) for help)
```sh
$ conda activate saber
# Notice your command prompt has changed to indicate that the environment is active
(saber) $
```
then install Saber
```sh
(saber) $ pip install saber
```
To get the latest development version of Saber, install it right from this repository with `pip`
```sh
(saber) $ pip install https://github.com/BaderLab/saber.git
```
or by cloning the repository and then using `pip` to install the package
```sh
(saber) $ git clone https://github.com/BaderLab/saber.git
(saber) $ cd saber
(saber) $ pip install .
```
For now, you will need to install the required [Spacy](https://spacy.io) model and the [keras-contrib](https://github.com/keras-team/keras-contrib) repository (even if you installed with `pip install saber`)
```sh
# keras-contrib
(saber) $ pip install git+https://www.github.com/keras-team/keras-contrib.git
# NeuralCoref medium model built on top of Spacy, this might take a few minutes to download!
(saber) $ pip install https://github.com/huggingface/neuralcoref-models/releases/download/en_coref_md-3.0.0/en_coref_md-3.0.0.tar.gz
```
### (OPTIONAL) Creating and activating virtual environments
When using `pip` it is generally recommended to install packages in a virtual environment to avoid modifying system state. To create a virtual environment named `saber`
#### Using virtualenv or venv
Using [virtualenv](https://virtualenv.pypa.io/en/stable/)
```sh
$ virtualenv --python=python3 /path/to/new/venv/saber
```
Using [venv](https://docs.python.org/3/library/venv.html)
```sh
$ python3 -m venv /path/to/new/venv/saber
```
Next, you need to activate the environment
```sh
$ source /path/to/new/venv/saber/bin/activate
# Notice your command prompt has changed to indicate that the environment is active
(saber) $
```
#### Using Conda
If you use [Conda](https://conda.io/docs/), you can create an environment named `saber` by running
```sh
$ conda create -n saber python=3.6
```
then activate the environment with
```sh
$ conda activate saber
# Again, your command prompt should change to indicate that the environment is active
(saber) $
```
## Quickstart
If your goal is to use Saber to annotate biomedical text, then you can either use the [web-service](#web-service) or a [pre-trained model](#pre-trained-models). If you simply want to check Saber out, without installing anything locally, try the [Google Colaboratory](#google-colaboratory) notebook.
### Google Colaboratory
The fastest way to check out Saber is by following along with the Google Colaboratory notebook ([](https://colab.research.google.com/drive/1WD7oruVuTo6p_908MQWXRBdLF3Vw2MPo)). In order to be able to run the cells, select "Open in Playground" or, alternatively, save a copy to your own Google Drive account (File > Save a copy in Drive).
### Web-service
To use Saber as a **local** web-service, run
```
(saber) $ python -m saber.cli.app
```
or, if you prefer, you can pull & run the Saber image from **Docker Hub**
```sh
# Pull Saber image from Docker Hub
$ docker pull pathwaycommons/saber
# Run docker (use `-dt` instead of `-it` to run container in background)
$ docker run -it --rm -p 5000:5000 --name saber pathwaycommons/saber
```
There are currently two endpoints, `/annotate/text` and `/annotate/pmid`. Both expect a `POST` request with a JSON payload, e.g.,
```json
{
"text": "The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53."
}
```
or
```json
{
"pmid": 11835401
}
```
For example, running the web-service locally and using `cURL`
```sh
$ curl -X POST 'http://localhost:5000/annotate/text' \
--data '{"text": "The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53."}'
```
Documentation for the Saber web-service API can be found [here](https://baderlab.github.io/saber-api-docs/).
### Pre-trained models
First, import the `Saber` class. This is the interface to Saber
```python
from saber.saber import Saber
```
To load a pre-trained model, first create a `Saber` object
```python
saber = Saber()
```
and then load the model of our choice
```python
saber.load('PRGE')
```
You can see all the pre-trained models in the [web-service API docs](https://baderlab.github.io/saber-api-docs/) or, the [saber/pretrained_models](saber/pretrained_models) folder in this repository, or by running the following line of code
```python
from saber.constants import ENTITIES; print(list(ENTITIES.keys()))
```
To annotate text with the model, just call the `Saber.annotate()` method
```python
saber.annotate("The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.")
```
See the [documentation](https://baderlab.github.io/saber/quick_start/) for more details.
## Documentation
Documentation for the Saber API can be found [here](https://baderlab.github.io/saber/). The web-service API has its own documentation [here](https://baderlab.github.io/saber-api-docs/#introduction). Finally, we provide a [jupyter notebook](notebooks/lightning_tour.ipynb) which introduces the main ways of using Saber. See [here](https://baderlab.github.io/saber/guide_to_saber_api/#juypter-notebooks) for help setting up [JupyterLab](https://github.com/jupyterlab/jupyterlab).
| /saber-0.1.0.tar.gz/saber-0.1.0/README.md | 0.663887 | 0.920074 | README.md | pypi |
# Quick Start
If your goal is simply to use Saber to annotate biomedical text, then you can either use the [web-service](#web-service) or a [pre-trained model](#pre-trained-models).
## Web-service
To use Saber as a **local** web-service, run
```
(saber) $ python -m saber.cli.app
```
or, if you prefer, you can pull & run the Saber image from **Docker Hub**
```
# Pull Saber image from Docker Hub
$ docker pull pathwaycommons/saber
# Run docker (use `-dt` instead of `-it` to run container in background)
$ docker run -it --rm -p 5000:5000 --name saber pathwaycommons/saber
```
!!! tip
Alternatively, you can clone the GitHub repository and build the container from the `Dockerfile` with `docker build -t saber .`
There are currently two endpoints, `/annotate/text` and `/annotate/pmid`. Both expect a `POST` request with a JSON payload, e.g.
```json
{
"text": "The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53."
}
```
or
```json
{
"pmid": 11835401
}
```
For example, with the web-service running locally
``` bash tab="Bash"
curl -X POST 'http://localhost:5000/annotate/text' \
--data '{"text": 'The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.'}'
```
``` python tab="python"
import requests # assuming you have requests package installed!
url = "http://localhost:5000/annotate/pmid"
payload = {"text": "The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53."}
response = requests.post(url, json=payload)
print(response.text)
print(response.status_code, response.reason)
```
??? warning
The first request to the web-service will be slow (~30s). This is because a large language
model needs to be loaded into memory.
Documentation for the Saber web-service API can be found [here](https://baderlab.github.io/saber-api-docs/). We hope to provide a live version of the web-service soon!
## Pre-trained models
First, import `Saber`. This class coordinates training, annotation, saving and loading of models and datasets. In short, this is the interface to Saber.
```python
from saber.saber import Saber
```
To load a pre-trained model, first create a `Saber` object
```python
saber = Saber()
```
and then load the model of our choice
```python
saber.load('PRGE')
```
You can see all the pre-trained models in the [web-service API docs](https://baderlab.github.io/saber-api-docs/) or, the [saber/pretrained_models](saber/pretrained_models) folder in this repository, or by running the following line of code
```python
from saber.constants import ENTITIES; print(list(ENTITIES.keys()))
```
To annotate text with the model, just call the `annotate()` method
```python
saber.annotate("The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.")
```
??? warning
The `annotate` method will be slow the first time you call it (~30s). This is because a large language model needs to be loaded into memory.
### Coreference Resolution
[**Coreference**](http://www.wikiwand.com/en/Coreference) occurs when two or more expressions in a text refer to the same person or thing, that is, they have the same **referent**. Take the following example:
_"__IL-6__ supports tumour growth and metastasising in terminal patients, and __it__ significantly engages in cancer cachexia (including anorexia) and depression associated with malignancy."_
Clearly, "__it__" referes to "__IL-6__". If we do not resolve this coreference, then "__it__" will not be labeled as an entity and any relation or event it is mentioned in will not be extracted. Saber uses [NeuralCoref](https://github.com/huggingface/neuralcoref), a state-of-the-art coreference resolution tool based on neural nets and built on top of [Spacy](https://spacy.io). To use it, just supply the argument `coref=True` (which is `False` by default) to the `annotate()` method
```python
text = "IL-6 supports tumour growth and metastasising in terminal patients, and it significantly engages in cancer cachexia (including anorexia) and depression associated with malignancy."
# WITHOUT coreference resolution
saber.annotate(text, coref=False)
# WITH coreference resolution
saber.annotate(text, coref=True)
```
!!! note
If you are using the web-service, simply supply `"coref": true` in your `JSON` payload to resolve coreferences.
Saber currently takes the simplest possible approach: replace all coreference mentions with their referent, and then feed the resolved text to the model that identifies named entities.
### Working with annotations
The `annotate()` method returns a simple `dict` object
```python
ann = saber.annotate("The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.")
```
which contains the keys `title`, `text` and `ents`
- `title`: contains the title of the article, if provided
- `text`: contains the text (which is minimally processed) the model was deployed on
- `ents`: contains a list of entities present in the `text` that were annotated by the model
For example, to see all entities annotated by the model, call
```python
ann['ents']
```
#### Converting annotations to JSON
The `annotate()` method returns a `dict` object, but can be converted to a `JSON` formatted string for ease-of-use in downstream applications
```python
import json
# convert to json object
json_ann = json.dumps(ann)
# convert back to python dictionary
ann = json.loads(json_ann)
```
| /saber-0.1.0.tar.gz/saber-0.1.0/docs/quick_start.md | 0.793586 | 0.859899 | quick_start.md | pypi |
# Guide to the Saber API
You can interact with Saber as a web-service (explained in [Quick start](https://baderlab.github.io/saber/quick_start/)), command line tool, python package, or via the Juypter notebooks. If you created a virtual environment, _remember to activate it first_.
### Command line tool
Currently, the command line tool simply trains the model. To use it, call
```
(saber) $ python -m saber.cli.train
```
along with any command line arguments. For example, to train the model on the [NCBI Disease](https://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/) corpus
```
(saber) $ python -m saber.cli.train --dataset_folder NCBI_Disease_BIO
```
!!! tip
See [Resources](https://baderlab.github.io/saber/resources/) for help preparing datasets and word embeddings for training.
Run `python -m saber.cli.train --help` to see all possible arguments.
Of course, supplying arguments at the command line can quickly become cumbersome. Saber also allows you to provide a configuration file, which can be specified like so
```
(saber) $ python -m saber.cli.train --config_filepath path/to/config.ini
```
Copy the contents of the [default config file](https://github.com/BaderLab/saber/blob/master/saber/config.ini) to a new `*.ini` file in order to get started.
!!! note
Arguments supplied at the command line overwrite those found in the configuration file, e.g.,
```
(saber) $ python -m saber.cli.train --dataset_folder path/to/dataset --k_folds 10
```
would overwrite the arguments for `dataset_folder` and `k_folds` found in the configuration file.
### Python package
You can also import Saber and interact with it as a python package. Saber exposes its functionality through the `Saber` class. Here is just about everything Saber does in one script:
```python
from saber.saber import Saber
# First, create a Saber object, which exposes Sabers functionality
saber = Saber()
# Load a dataset and create a model (provide a list of datasets to use multi-task learning!)
saber.load_dataset('path/to/datasets/GENIA')
saber.build()
# Train and save a model
saber.train()
saber.save('pretrained_models/GENIA')
# Load a model
del saber
saber = Saber()
saber.load('pretrained_models/GENIA')
# Perform prediction on raw text, get resulting annotation
raw_text = 'The phosphorylation of Hdm2 by MK2 promotes the ubiquitination of p53.'
annotation = saber.annotate(raw_text)
# Use transfer learning to continue training on a new dataset
saber.load_dataset('path/to/datasets/CRAFT')
saber.train()
```
#### Transfer learning
Transfer learning is as easy as training, saving, loading, and then continuing training of a model. Here is an example
```python
# Create and train a model on GENIA corpus
saber = Saber()
saber.load_dataset('path/to/datasets/GENIA')
saber.build()
saber.train()
saber.save('pretrained_models/GENIA')
# Load that model
del saber
saber = Saber()
saber.load('pretrained_models/GENIA')
# Use transfer learning to continue training on a new dataset
saber.load_dataset('path/to/datasets/CRAFT')
saber.train()
```
!!! info
There is currently no easy way to do this with the command line interface, but I am working on it!
#### Multi-task learning
Multi-task learning is as easy as specifying multiple dataset paths, either in the `config` file, at the command line via the flag `--dataset_folder`, or as an argument to `load_dataset()`. The number of datasets is arbitrary.
Here is an example using the last method
```python
saber = Saber()
# Simply pass multiple dataset paths as a list to load_dataset to use multi-task learning.
saber.load_dataset(['path/to/datasets/NCBI_Disease', 'path/to/datasets/Linnaeus'])
saber.build()
saber.train()
```
#### Training on GPUs
Saber will automatically train on as many GPUs as are available. In order for this to work, you must have [CUDA](https://developer.nvidia.com/cuda-downloads) and, optionally, [CudDNN](https://developer.nvidia.com/cudnn) installed. If you are using conda to manage your environment, then these are installed for you when you call
```
(saber) $ conda install tensorflow-gpu
```
Otherwise, install them yourself and use `pip` to install `tensorflow-gpu`
```
(saber) $ pip install tensorflow-gpu
```
??? warning
Use `pip install tensorflow-gpu==1.7.0` if you would like to train on multiple GPUs as `tensorflow-gpu` versions `>1.7.0` are currently throwing errors.
To control which GPUs Saber trains on, you can use the `CUDA_VISIBLE_DEVICES` environment variable, e.g.,
```
# To train exclusively on CPU
(saber) $ CUDA_VISIBLE_DEVICES="" python -m saber.cli.train
# To train on 1 GPU with ID=0
(saber) $ CUDA_VISIBLE_DEVICES="0" python -m saber.cli.train
# To train on 2 GPUs with IDs=0,2
(saber) $ CUDA_VISIBLE_DEVICES="0,2" python -m saber.cli.train
```
!!! tip
You can get information about your NVIDIA GPUs by typing `nvidia-smi` at the command line (assuming the GPUs are setup properly and the nvidia driver is installed).
#### Saving and loading models
In the following sections we introduce the saving and loading of models.
##### Saving a model
Assuming the model has already been created (see above), we can easily save our model like so
```python
save_dir = 'path/to/pretrained_models/mymodel'
saber.save(save_dir)
```
##### Loading a model
Lets illustrate loading a model with a new `Saber` object
```python
# Delete our previous Saber object (if it exists)
del saber
# Create a new Saber object
saber = Saber()
# Load a previous model
saber.load(path_to_saved_model)
```
### Juypter notebooks
First, grab the notebook. Go [here](https://raw.githubusercontent.com/BaderLab/saber/master/notebooks/lightning_tour.ipynb), then press `command` / `control` + `s`, and save the notebook as `lightning_tour.ipynb` somewhere on your computer.
Next, install [JupyterLab](https://github.com/jupyterlab/jupyterlab) by following the instructions [here](https://github.com/jupyterlab/jupyterlab#installation). Once installed, run:
```
(saber) $ jupyter lab
```
A new window will open in your browser. Use it to search for `lightning_tour.ipynb` on your computer.
!!! notes
- If you activated a virtual environment, "`myenv`", make sure you see **Python [venv:myenv]** in the top right of the Jupyter notebook.
- If you are using conda, you need to run `conda install nb_conda` with your environment activated (you only need to do this once!).
| /saber-0.1.0.tar.gz/saber-0.1.0/docs/guide_to_saber_api.md | 0.742422 | 0.902266 | guide_to_saber_api.md | pypi |
# Move your playlists to Beat Saber!
`pip3 install saberio-rewind`
[](https://git.io/typing-svg)
> Your cookie and token will be only stored locally once the program is initiated.
```sh
Type the song or the playlist path right here. Or Press ENTER to enter spotify playlist link. ❯
> Spotify playlist link: https://open.spotify.com/playlist/2RkesHtaItmUytcwLMJWuT
> Choose mode: [auto|list|test|[auto] bookmark [and download]]. ❯ bookmark
Client ID:
Secret ID:
✔ Configuration successful!
> Folder: .
> Playlist: songs
> Mode: list
✔ Search complete for RGB YOASOBI
╒════╤════════╤════════════════════════════════╤════════════════╤══════╤════════╤═════════════════════╤════════════╕
│ │ Code │ Song │ Mapper │ Up │ Down │ Difficulty │ Date │
╞════╪════════╪════════════════════════════════╪════════════════╪══════╪════════╪═════════════════════╪════════════╡
│ 1 │ 1a705 │ YOASOBI – Sangenshoku (Cinema) │ citizenfive │ 1511 │ 57 │ Ea, No, Ha, Ex, Ex+ │ 19.07.2021 │
├────┼────────┼────────────────────────────────┼────────────────┼──────┼────────┼─────────────────────┼────────────┤
│ 2 │ 16165 │ RGB – YOASOBI │ Joetastic │ 437 │ 36 │ Ha, Ex, Ex+ │ 30.03.2021 │
├────┼────────┼────────────────────────────────┼────────────────┼──────┼────────┼─────────────────────┼────────────┤
│ 3 │ 1ef4b │ RGB – YOASOBI │ Joetastic │ 98 │ 9 │ Ex+ │ 17.12.2021 │
├────┼────────┼────────────────────────────────┼────────────────┼──────┼────────┼─────────────────────┼────────────┤
│ 4 │ 160f3 │ YOASOBI – RGB │ Dack │ 299 │ 34 │ Ex │ 30.03.2021 │
├────┼────────┼────────────────────────────────┼────────────────┼──────┼────────┼─────────────────────┼────────────┤
│ 5 │ 1a537 │ YOASOBI – RGB │ Nolanimations │ 76 │ 9 │ Ex+ │ 16.07.2021 │
├────┼────────┼────────────────────────────────┼────────────────┼──────┼────────┼─────────────────────┼────────────┤
│ 6 │ 1669d │ RGB – YOASOBI │ hiyasi_penguin │ 66 │ 8 │ Ex │ 07.04.2021 │
╘════╧════════╧════════════════════════════════╧════════════════╧══════╧════════╧═════════════════════╧════════════╛
✔ Bookmarked RGB – YOASOBI
```
# Let's Starting by your Spotify Playlist.

```
❯ saberio -y "https://open.spotify.com/playlist/2RkesHtaItmUytcwLMJWuT"
> Choose mode: [auto|list|test|[auto] bookmark [and download]]. ❯ auto bookmark and download
> Choose a name for the playlist. (songs)❯
> Folder: .
> Playlist: songs
> Mode: auto
✔ Search complete for RGB YOASOBI
⚠ YOASOBI – Sangenshoku (Cinema) is by YOASOBI but not sure if its name is RGB...
✔ Bookmarked RGB – YOASOBI
✔ Downloaded RGB – YOASOBI
✔ Search complete for Moon Halo M2U
✔ Bookmarked M2U – Moon Halo
✔ Downloaded M2U – Moon Halo
✔ Search complete for Million Days SABAI
✔ Bookmarked Sabai Million Days ft. Hoang & Claire Ridgely
✔ Downloaded Sabai Million Days ft. Hoang & Claire Ridgely
✔ Search complete for Cello Suite No Johann Sebastian Bach
✖ No song was found for Cello Suite No Johann Sebastian Bach
```
## Or simply tell'em what you you want
```
❯ saberio -s "RGB--YOASOBI" -s "Moon Halo--M2U" -s "Million Days" --auto -b
> Choose a name for the playlist. (songs)❯
> Search: ['RGB--YOASOBI', 'Moon Halo--M2U', 'Million Days']
> Folder: .
> Playlist: songs
> Mode: auto
✔ Search complete for RGB YOASOBI
⚠ YOASOBI – Sangenshoku (Cinema) is by YOASOBI but not sure if its name is RGB...
✔ Already downloaded RGB – YOASOBI
✔ Search complete for Moon Halo M2U
✔ Already downloaded M2U – Moon Halo
✔ Search complete for Million Days
✔ Downloaded My Stick! – Bad Lip Reading (Standard, 90 degree)
```
### Or give it a list like this
```
# file ./myloves
RGB--YOASOBI
Moon Halo--M2U
Million Days
```
and
```
❯ saberio -p loves -f myloves --auto -b
> Songs list file: myloves
> Folder: .
> Playlist: loves
> Mode: auto
✔ Search complete for RGB YOASOBI
⚠ YOASOBI – Sangenshoku (Cinema) is by YOASOBI but not sure if its name is RGB...
✔ Downloaded RGB – YOASOBI
✔ Search complete for Moon Halo M2U
✔ Downloaded M2U – Moon Halo
✔ Search complete for Million Days
✔ Downloaded My Stick! – Bad Lip Reading (Standard, 90 degree)
```
## There can be many combos
```
❯ saberio -h
usage: PySaber [-h] [-p PLAYLIST] [-d PATH] [-s SONG | -f FILE | -y SPOTIFY] [-c COOKIE] [-b] [-o] [--auto | --list | --test] [--verbose]
Let's rock on Beat Saber.
options:
-h, --help show this help message and exit
-p PLAYLIST playlist name
-d PATH, --dir PATH path where to save the song (playlist parent folder)
-s SONG, --song SONG song(s) name to execute
-f FILE, --file FILE text file with a songs list
-y SPOTIFY, --spotify SPOTIFY
spotify playlist link
--auto automatic download first matching song
--list choose a song from the matching list for every song
--test test automatic matching withuout downloading
--verbose log every run of the script
-c COOKIE, --cookie COOKIE
cookie for bookmarking.
-b bookmark all songs searched from BeastSaber.
-o bookmark songs without downloading.
Example: pysaber -f songs.txt -p BeastSaver --list
```
### But if you feel a bit lost,
you can always use the interactive mode just by pressing ENTERs!
```
❯ saberio
Type the song or the playlist path right here. Or Press ENTER to enter spotify playlist link. ❯
> Spotify playlist link:
> Choose mode: [auto|list|test|[auto] bookmark [and download]]. ❯
```
## In case you have no clue how to get the cookie
Simply put:
1. Login to [bsaber.com](https://basaber.com/)
2. Press F12, navigate to the network selection.
3. Bookmark or unbookmark a song.
4. See the new request under the selcetion? click that row.
5. Scroll down the new pop-up window and copy the value of cookie from the header. Check this picture...
 | /saberio-rewind-1.0.3.post1.tar.gz/saberio-rewind-1.0.3.post1/README.md | 0.637369 | 0.783802 | README.md | pypi |
import cv2
import os
import wget
import pandas as pd
from PIL import Image
from sabhi_utils.image_utils.utils import load_image
from pathlib import Path
def initialize_face_detector(detector_backend):
home = str(Path.home())
output_dir = os.path.join(
home,
'.sabhi_utils/weights/'
)
os.makedirs(output_dir, exist_ok=True)
if detector_backend == 'ssd':
# check required ssd model exists in the home/.sabhi_utils/weights folder
# model structure
if os.path.isfile(
os.path.join(output_dir, 'deploy.prototxt')
) != True:
print("deploy.prototxt will be downloaded...")
url = "https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt"
output = os.path.join(output_dir, 'deploy.prototxt')
wget.download(url, output)
# pre-trained weights
if os.path.isfile(
os.path.join(
output_dir,
'res10_300x300_ssd_iter_140000.caffemodel')
) != True:
print("res10_300x300_ssd_iter_140000.caffemodel will be downloaded...")
url = "https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel"
output = os.path.join(
output_dir,
'res10_300x300_ssd_iter_140000.caffemodel'
)
wget.download(url, output)
face_detector = cv2.dnn.readNetFromCaffe(
home+"/.sabhi_utils/weights/deploy.prototxt",
home+"/.sabhi_utils/weights/res10_300x300_ssd_iter_140000.caffemodel"
)
return face_detector
def detect_face(
img,
detector_backend='ssd',
grayscale=False,
tolerance=.88,
enforce_detection=True,
debug=False
):
img = load_image(img)
face_detector = initialize_face_detector(
detector_backend=detector_backend
)
if detector_backend == 'ssd':
ssd_labels = ["img_id", "is_face", "confidence",
"left", "top", "right", "bottom"]
target_size = (300, 300)
base_img = img.copy() # we will restore base_img to img later
original_size = img.shape
img = cv2.resize(base_img, target_size)
if debug:
cv2.imshow("img", img)
cv2.waitKey(0)
#aspect_ratio_x = (original_size[1] / target_size[1])
#aspect_ratio_y = (original_size[0] / target_size[0])
imageBlob = cv2.dnn.blobFromImage(image=img)
face_detector.setInput(imageBlob)
detections = face_detector.forward()
detections_df = pd.DataFrame(detections[0][0], columns=ssd_labels)
detections_df = detections_df.loc[
detections_df['confidence'] >= tolerance
]
return detections_df
def num_of_faces(image):
return len(detect_face(image).index)
class FaceRecognition:
def __init__(
self,
debug=False
):
self._debug = debug
def __call__(
self,
image=None,
):
self._image = image
self._columns = [
"confidence",
"left",
"top"
]
self._detected_faces_df = detect_face(image)
self._num_faces = len(self._detected_faces_df)
if self._num_faces > 0:
return {
"num_of_faces": self._num_faces,
self._columns[0]: self._detected_faces_df.loc[0, self._columns[0]],
self._columns[1]: self._detected_faces_df.loc[0, self._columns[1]],
self._columns[2]: self._detected_faces_df.loc[0, self._columns[2]]
} | /sabhi_utils-2.0.8-py3-none-any.whl/sabhi_utils/face_utility.py | 0.545044 | 0.199444 | face_utility.py | pypi |
import cv2
import numpy as np
import imutils
from sabhi_utils.image_utils.utils import load_image
class TemplateMatching:
def __init__(
self,
method=cv2.TM_CCOEFF_NORMED,
matching_threshold=0.75,
scales=np.linspace(0.2, 1.0, 20)[::-1],
debug=False
):
self._method = method
self._matching_threshold = matching_threshold
self._scales = scales
self._debug = debug
def _multi_scale_match_template(self, found=None):
image = self._image.copy()
for scale in self._scales:
resized_image = imutils.resize(
image,
width=int(image.shape[1] * scale)
)
r = image.shape[1] / float(resized_image.shape[1])
if resized_image.shape[0] < self._template_height or resized_image.shape[1] < self._template_width:
break
result = cv2.matchTemplate(
resized_image,
self._template,
method=self._method
)
#match_locations = np.where(
# result >= self._matching_threshold
#)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if self._debug:
# draw a bounding box around the detected region
clone = resized_image
cv2.rectangle(clone, (maxLoc[0], maxLoc[1]),
(maxLoc[0] + self._template_width, maxLoc[1] + self._template_height), (0, 0, 255), 2)
cv2.imshow("Visualize", clone)
cv2.waitKey(0)
#cv2.imshow("result", result)
# cv2.waitKey(0)
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
return found
def __call__(
self,
image,
template,
label="1"
):
self._image = load_image(image)
self._template = template
self._template_height, self._template_width = self._template.shape[:2]
result = self._multi_scale_match_template()
return result | /sabhi_utils-2.0.8-py3-none-any.whl/sabhi_utils/image_utils/template_matching.py | 0.526343 | 0.199776 | template_matching.py | pypi |
import numpy as np
from sabhi_utils.image_utils.processors import Closer
from sabhi_utils.image_utils.processors import EdgeDetector
from sabhi_utils.image_utils.processors import Opener
from collections import defaultdict
import itertools
import cv2
import time
from sklearn.cluster import KMeans
class HoughLineCornerDetector:
def __init__(
self,
rho_acc=2,
theta_acc=360,
thresh=100,
output_process=True
):
self.rho_acc = rho_acc
self.theta_acc = theta_acc
self.thresh = thresh
self.output_process = output_process
self._preprocessor = [
Closer(output_process=output_process),
EdgeDetector(output_process=output_process),
#Opener(output_process=output_process)
]
def _get_hough_lines(self):
"""
Extract straight lines from image using Hough Transform.
Returns
----------
array containing rho and theta of lines (Hess Norm formulation)
"""
image = cv2
lines = cv2.HoughLines(
self._image, self.rho_acc, np.pi / self.theta_acc, self.thresh
)
return lines
def _draw_hough_lines(
self,
image,
lines,
file_prefix=str(time.time())
):
hough_line_output = image
n = self._n
for line in lines:
rho, theta = line[0]
a, b = np.cos(theta), np.sin(theta)
x0, y0 = a * rho, b * rho
x1 = int(x0 + n * (-b))
y1 = int(y0 + n * (a))
x2 = int(x0 - n * (-b))
y2 = int(y0 - n * (a))
cv2.line(hough_line_output, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite("output/"+file_prefix+"hough_line.jpg", hough_line_output)
def _segment_by_angle_kmeans(self, k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
lines = self._lines
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = (
cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
)
criteria = kwargs.get("criteria", (default_criteria_type, 10, 1.0))
flags = kwargs.get("flags", cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get("attempts", 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array(
[[np.cos(2 * angle), np.sin(2 * angle)] for angle in angles],
dtype=np.float32,
)
# run kmeans on the coords
labels, centers = cv2.kmeans(
pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def _get_intersections(self, k=2):
"""Finds the intersections between groups of lines."""
segmented_lines = self._segmented_lines
intersections = []
group_lines = list(itertools.product(
segmented_lines[0], segmented_lines[1]))
for line_i, line_j in group_lines:
int_point = self._intersection(line_i, line_j)
intersections.append(int_point)
return intersections
def _find_quadrilaterals(self):
X = np.array([[point[0][0], point[0][1]]
for point in self._intersections])
kmeans = KMeans(
n_clusters=4,
init="k-means++",
max_iter=100,
n_init=10,
random_state=0
).fit(X)
if self.output_process:
self._draw_quadrilaterals(self._lines, kmeans)
return [[center.tolist()] for center in kmeans.cluster_centers_]
def _draw_quadrilaterals(
self,
lines,
kmeans,
file_prefix=str(time.time())
):
grouped_output = self._get_color_image()
n = self._n
for idx, line in enumerate(lines):
rho, theta = line[0]
a, b = np.cos(theta), np.sin(theta)
x0, y0 = a * rho, b * rho
x1 = int(x0 + n * (-b))
y1 = int(y0 + n * (a))
x2 = int(x0 - n * (-b))
y2 = int(y0 - n * (a))
cv2.line(grouped_output, (x1, y1), (x2, y2), (0, 0, 255), 2)
for point in kmeans.cluster_centers_:
x, y = point
cv2.circle(grouped_output, (int(x), int(y)), 5, (255, 255, 255), 5)
cv2.imwrite(
"output/"+str(file_prefix)+"grouped_quads.jpg",
grouped_output
)
def _get_angle_between_lines(self, line_1, line_2):
rho1, theta1 = line_1[0]
rho2, theta2 = line_2[0]
# x * cos(theta) + y * sin(theta) = rho
# y * sin(theta) = x * (- cos(theta)) + rho
# y = x * (-cos(theta) / sin(theta)) + rho
m1 = -(np.cos(theta1) / np.sin(theta1))
m2 = -(np.cos(theta2) / np.sin(theta2))
return abs(np.arctan(abs(m2 - m1) / (1 + m2 * m1))) * (180 / np.pi)
def _intersection(self, line1, line2):
"""Finds the intersection of two lines given in Hesse normal form.
Returns closest integer pixel locations.
See https://stackoverflow.com/a/383527/5087436
"""
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array(
[[np.cos(theta1), np.sin(theta1)], [
np.cos(theta2), np.sin(theta2)]]
)
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def _draw_intersections(
self,
image,
intersections,
file_prefix=str(time.time())
):
intersection_point_output = image
n = self._n
for line in self._lines:
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + n * (-b))
y1 = int(y0 + n * (a))
x2 = int(x0 - n * (-b))
y2 = int(y0 - n * (a))
cv2.line(intersection_point_output,
(x1, y1), (x2, y2), (0, 0, 255), 2)
for point in intersections:
x, y = point[0]
cv2.circle(intersection_point_output,
(x, y), 5, (255, 255, 127), 5)
cv2.imwrite("output/"+file_prefix+"intersection_point_output.jpg",
intersection_point_output)
def _get_color_image(self):
return cv2.cvtColor(self._image.copy(), cv2.COLOR_GRAY2RGB)
def __call__(
self,
image,
debug=False
):
# Step 1: Process for edge detection
self._image = image
self._n = max(self._image.shape[0], self._image.shape[1])
for processor in self._preprocessor:
self._image = processor(self._image)
# Step 2: Get hough lines
self._lines = self._get_hough_lines()
self._segmented_lines = self._segment_by_angle_kmeans()
# Step 3: Get intersection points
self._intersections = self._get_intersections()
# Step 4: Get Quadrilaterals
self._quadrilaterals = self._find_quadrilaterals()
if self.output_process:
self._draw_intersections(
image,
self._intersections
)
self._draw_hough_lines(
image,
self._lines)
return self._quadrilaterals | /sabhi_utils-2.0.8-py3-none-any.whl/sabhi_utils/image_utils/hough_line_corner_detector.py | 0.802942 | 0.409634 | hough_line_corner_detector.py | pypi |
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import (calinski_harabasz_score, davies_bouldin_score,
pairwise_distances, silhouette_score)
def convertion_to_array(emb):
"""Converte a matriz de embeddings para um array numpy
:param emb: matriz de embeddings
:type emb: numpy.array|scipy.sparse.csr.csr_matrix
:return: matriz de embeddings convertida
:rtype: numpy.array
"""
if isinstance(emb, np.ndarray):
return emb
else:
try:
return emb.toarray()
except Exception:
raise Exception("O tipo da matriz de embeddings não é suportado.")
class Evaluate:
def __init__(self, labels, emb):
"""
Construtor Evaluate.
:param labels: Os rótulos das amostras.
:type labels: list ou numpy.array
:param emb: As embeddings das amostras.
:type emb: numpy.array
"""
self.emb = emb
self.labels = labels
self.num_samples = len(self.labels)
self.groups_indexes = np.unique(self.labels)
self.n_clusters = len(self.groups_indexes)
self.idx_dict = []
self.centroides = []
self.inertias = []
self.cluster_samples = []
self.inner_distances = None
self.size_of_clusters = None
for g in self.groups_indexes:
d = {"grupo": g, "indexes": [id for id, v in enumerate(self.labels) if g == v]}
self.idx_dict.append(d)
def get_silhouette_score(self):
"""
Calcula o índice de silhueta para as amostras.
:return: O valor do índice de silhueta.
:rtype: float
"""
return silhouette_score(self.emb, self.labels)
def get_sizes_of_clusters(self):
"""
Calcula os tamanhos dos clusters.
:return: Uma lista com os tamanhos dos clusters.
:rtype: list
"""
if not self.size_of_clusters:
counter = Counter(self.labels)
size_of_clusters = [counter[i] for i in self.groups_indexes]
self.size_of_clusters = size_of_clusters
return self.size_of_clusters
def get_centroides(self):
"""
Calcula os centroides dos clusters.
:return: Os centroides dos clusters.
:rtype: numpy.array
"""
assert self.num_samples == self.emb.shape[0]
if len(self.centroides) == 0:
centroides = []
for d in self.idx_dict:
centroide = np.mean(self.emb[d["indexes"], :], axis=0)
centroides.append(centroide)
self.centroides = np.squeeze(np.asarray(centroides))
return self.centroides
def get_inertias(self):
"""
Calcula as inércias dos clusters.
:return: Uma lista com as inércias dos clusters.
:rtype: list
"""
if len(self.centroides) == 0:
self.get_centroides()
assert self.num_samples == self.emb.shape[0]
assert self.n_clusters == self.centroides.shape[0]
if not self.inertias:
inertias = []
for i, d in enumerate(self.idx_dict):
ssd = []
for j in range(self.emb[d["indexes"]].shape[0]):
diff = self.emb[d["indexes"]][j, :] - self.centroides[i]
ssd.append(np.inner(diff, diff))
inertias.append(np.sum(ssd))
self.inertias = inertias
return self.inertias
def get_inertia(self):
"""
Calcula a inércia total dos clusters.
:return: O valor da inércia total.
:rtype: float
"""
if not self.inertias:
self.get_inertias()
self.inertia = sum(self.inertias)
return self.inertia
# Distancia media dos elementos dos grupos
def get_inner_distances(self):
"""
Calcula as distâncias médias dentro dos clusters.
:return: Uma lista com as distâncias médias dentro dos clusters.
:rtype: list
"""
if not self.inner_distances:
inner_distances = []
for i, d in enumerate(self.idx_dict):
distances = pairwise_distances(self.emb[d["indexes"], :], metric="euclidean")
inner_distances.append(np.mean(distances))
self.inner_distances = inner_distances
return inner_distances
def get_cluster_distance_avg(self):
"""
Calcula a distância média dos elementos
até o centroide de cada cluster.
:return: O valor da distância média dos elementos até o centroide.
:rtype: float
"""
if len(self.centroides) == 0:
self.get_centroides()
distancias_pontos_centroides = pairwise_distances(self.emb, self.centroides)
distancia_media_grupo = []
for i in range(self.n_clusters):
group_indexes = np.where(self.labels == i)[0]
if len(group_indexes) > 0:
distancia_media_grupo.append(np.mean(distancias_pontos_centroides[group_indexes, i]))
else:
distancia_media_grupo.append(np.nan)
self.cluster_distance_avg = np.nanmean(distancia_media_grupo)
return self.cluster_distance_avg
def get_outer_distance(self):
"""
Calcula a distância média entre os centroides dos clusters.
:return: A matriz de distâncias entre os centroides.
:rtype: numpy.array
"""
if len(self.centroides) == 0:
self.get_centroides()
assert self.n_clusters == len(self.centroides)
outer_distance = pairwise_distances(self.centroides)
self.outer_distance = np.mean(outer_distance)
return outer_distance
def get_calinski_harabasz(self):
"""
Calcula o índice de Calinski-Harabasz para a avaliação dos clusters.
:return: O valor do índice de Calinski-Harabasz.
:rtype: float
"""
self.calinski_harabasz = calinski_harabasz_score(convertion_to_array(self.emb), self.labels)
return self.calinski_harabasz
def get_davies_bouldin(self):
"""
Calcula o índice de Davies-Bouldin para a avaliação dos clusters.
:return: O valor do índice de Davies-Bouldin.
:rtype: float
"""
self.davies_bouldin = davies_bouldin_score(convertion_to_array(self.emb), self.labels)
return self.davies_bouldin
def get_metrics(
self, metrics=["inertia", "silhouette_score", "calinski_harabasz", "davies_bouldin", "outer_distance"]
):
"""Retorna as métricas necessárias para avaliar o treinamento.
:param metrics: Métricas que deseja coletar juntos, defaults to ["inertia", "silhouette_score", "calinski_harabasz","davies_bouldin", "outer_distance"]
:type metrics: list, optional
:return: dicionário contrento nome da métrica e o seu valor
:rtype: dict
:raises Exception: Exeção disparada quando recebe uma métrica que não existe na classe
""" # noqa: E501
dict_metrics = {}
for metric in metrics:
get_metric = f"get_{metric}"
if hasattr(self, get_metric) and callable(getattr(self, get_metric)):
dict_metrics.update({metric: getattr(self, get_metric)()})
else:
raise Exception(f"Metric {metric} invalid or not implemented")
self.metrics = dict_metrics
return self.metrics
def get_inertias_chart(self):
"""
Plota o gráfico da inercia de cada cluster.
:return: O objeto matplotlib.pyplot com o gráfico do índice de silhueta.
:rtype: matplotlib.pyplot.Figure
"""
if not self.inertias:
self.get_inertias()
# Define o backend adequado para execução em ambiente concorrente
plt.switch_backend("Agg")
fig, ax = plt.subplots()
ax.grid(True)
num_clusters = np.arange(0, self.n_clusters)
ax.bar(num_clusters, self.inertias)
ax.set_title("Inercia dos Clusteres")
ax.set_xlabel("Índice do Cluster")
ax.set_ylabel("Inercia")
ax.set_xticks(num_clusters)
return fig
def get_inner_distances_chart(self):
"""
Plota o gráfico da distância média emtre elementos dos clusters.
:return: O objeto matplotlib.pyplot com o gráfico do índice de silhueta.
:rtype: matplotlib.pyplot.Figure
"""
if not self.inner_distances:
self.get_inner_distances()
# Define o backend adequado para execução em ambiente concorrente
plt.switch_backend("Agg")
num_clusters = np.arange(0, self.n_clusters)
fig, ax = plt.subplots()
ax.grid(True)
ax.bar(num_clusters, self.inner_distances)
ax.set_title("Distância média entre elementos dos clusters.")
ax.set_xlabel("Índice do Cluster")
ax.set_ylabel("Distância média entre elementos do cluster")
ax.set_xticks(num_clusters)
return fig
def get_charts(self, charts=["inertias", "inner_distances", "num_samples"]):
"""Retorna dicionario com os gráficos de avaliação individual do modelo
:param charts: Gráficos que deseja coletar juntos, defaults to ["inertias", "inner_distances", "num_samples"]
:return: Lista com gŕaficos para armazenar no mlflow
:rtype: list[dict(str,fig|str)]
"""
charts_return = list()
for chart in charts:
get_chart = f"get_{chart}_chart"
if hasattr(self, get_chart) and callable(getattr(self, get_chart)):
charts_return.append({"name": f"{chart}.png", "figure": getattr(self, get_chart)()})
else:
raise Exception(f"Chart {chart} invalid or not implemented")
return charts_return
def get_num_samples(self):
"""
Obtém o número de amostras de cada cluster.
:return: Lista com o número de amostras de cada cluster.
:rtype: list
"""
if not self.cluster_samples:
self.cluster_samples = [len(g["indexes"]) for g in self.idx_dict]
return self.cluster_samples
def get_num_samples_chart(self):
"""
Plota o boxplot do número de amostras de cada cluster.
:return: O objeto matplotlib.pyplot do boxplot.
:rtype: matplotlib.pyplot.Figure
"""
if not self.cluster_samples:
self.get_num_samples()
plt.switch_backend("Agg")
fig, ax = plt.subplots()
# num_clusters = np.arange(0, self.n_clusters)
ax.boxplot(self.cluster_samples)
# ax.set_xticklabels(num_clusters)
ax.set_title("Intervalo de número de amostras por cluster")
ax.set_xlabel("Quantidade de clusters")
ax.set_ylabel("Número de amostras por cluster")
return fig | /sabia-utils-0.2.0.tar.gz/sabia-utils-0.2.0/sabia_utils/evaluate.py | 0.834238 | 0.628806 | evaluate.py | pypi |
import matplotlib.pyplot as plt
import numpy as np
class EvaluateExperiments:
def __init__(self, metrics):
"""
Construtor EvaluateExperiments.
:param metrics: As métricas de avaliação dos clusters.
:type metrics: list
"""
self.metrics = metrics
self.inertias = [metric["inertia"] for metric in self.metrics]
self.silhouette_scores = [metric["silhouette_score"] for metric in self.metrics]
self.num_clusters = [metric["num_clusters"] for metric in self.metrics]
def get_elbow(self, line_coefficients=False):
"""
Calcula os valores do método do cotovelopara determinar o número ideal de clusters.
:param line_coefficients: Indica se os coeficientes da reta devem ser retornados, defaults para False.
:type returnA: bool, optional
:return: Os valores do método do cotovelo ou os coeficientes da reta, dependendo do parâmetro line_coefficients.
:rtype: list ou tuple
""" # noqa: E501
slope = (self.inertias[-1] - self.inertias[0]) / (self.num_clusters[-1] - self.num_clusters[0])
b = [y - slope * x for x, y in zip(self.num_clusters, self.inertias)]
if line_coefficients:
return slope, b
else:
return b
def get_elbow_chart(self):
"""
Plota o gráfico do método do cotovelo para determinar o número ideal de clusters.
:return: O objeto matplotlib.pyplot com o gráfico do método do cotovelo.
:rtype: matplotlib.pyplot.Figure
"""
plt.switch_backend("Agg")
fig, ax = plt.subplots()
ax.grid(True)
a, b = self.get_elbow(line_coefficients=True)
min_i = np.argmin(b)
min_b = self.inertias[min_i]
ax.plot(self.num_clusters, self.inertias, marker="o")
ax.plot([self.num_clusters[min_i]], [min_b], marker="*", ls="none", ms=20)
ax.set_title("Método do cotovelo.")
ax.set_xlabel("Quantidade de clusters")
ax.set_ylabel("Inércia")
ax.set_xticks(self.num_clusters)
return fig
def get_silhouette_chart(self):
"""
Plota o gráfico do índice de silhueta
para determinar o número ideal de clusters.
:return: O objeto matplotlib.pyplot
com o gráfico do índice de silhueta.
:rtype: matplotlib.pyplot.Figure
"""
plt.switch_backend("Agg") # backend adequado para execução em ambiente concorrente
fig, ax = plt.subplots()
ax.grid(True)
ax.plot(self.num_clusters, self.silhouette_scores, marker="o")
ax.set_title("Índice de Silhueta.")
ax.set_xlabel("Número de Clusters")
ax.set_ylabel("Índice de Silhueta")
ax.set_xticks(self.num_clusters)
return fig
def get_charts(self):
"""Retorna lista de dicionarios contendo o grafico e sua etiqueta
:return: Lista com gŕaficos para armazenar no mlflow
:rtype: list[dict(str,fig|str)]
"""
return [
{"figure": self.get_elbow_chart(), "name": "elbowChart.png"},
{"figure": self.get_silhouette_chart(), "name": "silhouetteChart.png"},
]
def get_best_k(self):
"""Retorna o k (numero de clusters) ótimo segundo o método do cotovelo
:return: valor do k
:rtype: int
"""
a, b = self.get_elbow(line_coefficients=True)
# line eq: #y=ax+b
# quando x = 0, y=b
# estamos interessados no b mínimo
k = self.num_clusters[np.argmin(b)]
return k
def get_metrics(self, metrics=["best_k"]):
"""Retorna as métricas necessárias para avaliar um conjunto de modelos
:param metrics: Métricas que deseja coletar juntos, defaults to ["best_k"]
:type metrics: list, optional
:return: dicionário contrento nome da métrica e o seu valor
:rtype: dict
:raises Exception: Exeção disparada quando recebe uma métrica que não existe na classe
""" # noqa: E501
dict_metrics = {}
for metric in metrics:
get_metric = f"get_{metric}"
if hasattr(self, get_metric) and callable(getattr(self, get_metric)):
dict_metrics.update({metric: getattr(self, get_metric)()})
else:
raise Exception(f"Metric {metric} invalid or not implemented")
self.metrics = dict_metrics
return self.metrics | /sabia-utils-0.2.0.tar.gz/sabia-utils-0.2.0/sabia_utils/evaluateExperiments.py | 0.79999 | 0.577674 | evaluateExperiments.py | pypi |
import datetime
import difflib
import pytz
class Sablier(object):
"""Date, time and timezone for the rest of us."""
def __init__(self, date=None, time=datetime.time(0), timezone=None):
self.date = date
self.time = time
self.timezone = pytz.timezone(disambiguate(timezone)) if timezone else None
def On(self, date_or_year=None, month=None, day=None):
"""Chainable date setter"""
if date_or_year is None:
self.date = datetime.date.today()
elif isinstance(date_or_year, datetime.date):
self.date = date_or_year
else:
if month is None:
raise TypeError('month required (pos 2)')
if day is None:
raise TypeError('day required (pos 3)')
self.date = datetime.date(date_or_year, month, day)
return self
def At(self, time_or_hour=None, minute=0, second=0):
"""Chainable time setter"""
if time_or_hour is None:
self.time = datetime.datetime.now().time()
elif isinstance(time_or_hour, datetime.time):
self.time = time_or_hour
else:
self.time = datetime.time(time_or_hour, minute, second)
return self
def In(self, timezone):
"""Change timezone and return a new Sablier instance"""
if self.timezone is None:
self.timezone = pytz.timezone(disambiguate(timezone))
return self
else:
dt = self.datetime_in(timezone)
return Sablier(dt.date(), dt.time(), timezone)
@property
def datetime(self):
"""Localized datetime"""
try:
year, month, day = self.date.year, self.date.month, self.date.day
except AttributeError:
raise InvalidDate()
try:
hour, minute, second = self.time.hour, self.time.minute, self.time.second
except AttributeError:
raise InvalidTime()
return self.timezone.localize(datetime.datetime(year, month, day,
hour, minute, second))
def datetime_in(self, timezone):
"""Change timezone and return datetime.datetime"""
if self.timezone is None:
self.timezone = pytz.utc
timezone = disambiguate(timezone)
return self.datetime.astimezone(pytz.timezone(timezone))
def date_in(self, timezone):
"""Change timezone and return datetime.date"""
return self.datetime_in(timezone).date()
def time_in(self, timezone):
"""Change timezone and return datetime.time"""
return self.datetime_in(timezone).time()
def __repr__(self):
zone = self.timezone.zone if self.timezone else 'UTC'
return "Sablier(%r, %r, '%s')" % (self.date, self.time, zone)
def __eq__(self, other):
return all((self.date == other.date,
self.time == other.time,
self.timezone == other.timezone))
def __sub__(self, other):
if isinstance(other, Sablier):
return self.datetime - other.datetime_in(self.timezone.zone)
elif isinstance(other, datetime.timedelta):
dt = self.datetime - other
return Sablier(dt.date(), dt.time(), self.timezone.zone)
else:
return NotImplemented
def __add__(self, timedelta):
if isinstance(timedelta, datetime.timedelta):
dt = self.datetime + timedelta
return Sablier(dt.date(), dt.time(), self.timezone.zone)
else:
return NotImplemented
def disambiguate(timezone):
"""Disambiguates timezone string, raise AmbiguousTimezone"""
if timezone not in pytz.all_timezones:
candidates = [candidate for candidate in pytz.all_timezones if timezone in candidate]
else:
candidates = [timezone]
if len(candidates) == 0:
candidates = difflib.get_close_matches(timezone, pytz.all_timezones)
if len(candidates) > 1:
raise AmbiguousTimezone('%s: use one of %s' % (timezone, candidates))
if len(candidates) == 0:
raise UnknownTimezone(timezone)
return candidates[0]
def On(*args):
"""Date constructor"""
if len(args) == 0:
date = datetime.date.today()
elif not isinstance(args[0], datetime.date):
date = datetime.date(*args)
else:
date = args[0]
return Sablier(date=date)
def At(*args):
"""Time constructor"""
if len(args) == 0:
time = datetime.datetime.now().time()
elif not isinstance(args[0], datetime.time):
time = datetime.time(*args)
else:
time = args[0]
return Sablier(time=time)
def In(timezone):
"""Timezone constructor"""
return Sablier(timezone=timezone)
def Epoch(timestamp):
"""Epoch timestamp constructor"""
dt = datetime.datetime.utcfromtimestamp(timestamp)
return Sablier(date=dt.date(), time=dt.time(), timezone='UTC')
def Datetime(datetime):
"""Datetime constructor"""
return On(datetime.date()).At(datetime.time())
class SablierError(Exception):
"""Generic sablier exception"""
pass
class AmbiguousTimezone(SablierError):
"""Raised when can't disambiguate and timezone string"""
pass
class InvalidDate(SablierError):
"""Raised when date is not or improperly set"""
pass
class InvalidTime(SablierError):
"""Raised when time is not or improperly set"""
pass
class UnknownTimezone(SablierError):
"""Raised when we can't fuzzy match the timezone"""
pass | /sablier-0.2.2.tar.gz/sablier-0.2.2/sablier.py | 0.745676 | 0.41745 | sablier.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /saboohi_dsnd-0.1.tar.gz/saboohi_dsnd-0.1/saboohi_dsnd/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
# SABRmetrics
<div>
<a href="https://github.com/JacobLee23/SABRmetrics/blob/master/LICENSE" target="_blank">
<img src="https://img.shields.io/github/license/JacobLee23/SABRmetrics" alt="LICENSE">
</a>
<img src="https://img.shields.io/pypi/pyversions/SABRmetrics" alt="PyPI - Python Version">
<a href="https://github.com/JacobLee23/SABRmetrics/blob/master/Pipfile" target="_blank">
<img src="https://img.shields.io/github/pipenv/locked/python-version/JacobLee23/SABRmetrics" alt="GitHub Pipenv locked Python version">
</a>
<a href="https://pypi.org/project/sabrmetrics/" target="_blank">
<img src="https://img.shields.io/pypi/v/sabrmetrics" alt="PyPI">
</a>
<a href="https://github.com/JacobLee23/SABRmetrics/releases/latest" target="_blank">
<img src="https://img.shields.io/github/v/release/JacobLee23/SABRmetrics" alt="GitHub release (latest SemVer)">
</a>
<a href="https://github.com/JacobLee23/SABRmetrics/tags" target="_blank">
<img src="https://img.shields.io/github/v/tag/JacobLee23/SABRmetrics" alt="GitHub tag (latest SemVer)">
</a>
</div>
<div>
<img src="https://img.shields.io/github/languages/code-size/JacobLee23/SABRmetrics" alt="GitHub code size in bytes">
<a href="https://github.com/JacobLee23/SABRmetrics/find/master" target="_blank">
<img src="https://img.shields.io/github/directory-file-count/JacobLee23/SABRmetrics" alt="GitHub repo file count">
</a>
<img src="https://img.shields.io/github/repo-size/JacobLee23/SABRmetrics" alt="GitHub repo size">
<img src="https://img.shields.io/github/commit-activity/m/JacobLee23/SABRmetrics" alt="Commit Activity (Month)">
<a href="https://github.com/JacobLee23/SABRmetrics/commits/master" target="_blank">
<img src="https://img.shields.io/github/last-commit/JacobLee23/SABRmetrics" alt="Last Commit">
</a>
</div>
<div>
<a href="https://github.com/JacobLee23/SABRmetrics/issues" target="_blank">
<img src="https://img.shields.io/github/issues-raw/JacobLee23/SABRmetrics" alt="GitHub issues">
</a>
<a href="https://github.com/JacobLee23/SABRmetrics/issues?q=is%3Aissue+is%3Aclosed" target="_blank">
<img src="https://img.shields.io/github/issues-closed-raw/JacobLee23/SABRmetrics" alt="GitHub closed issues">
</a>
<a href="https://github.com/JacobLee23/SABRmetrics/pulls" target="_blank">
<img src="https://img.shields.io/github/issues-pr-raw/JacobLee23/SABRmetrics" alt="GitHub pull requests">
</a>
<a href="https://github.com/JacobLee23/SABRmetrics/pulls?q=is%3Apr+is%3Aclosed" target="_blank">
<img src="https://img.shields.io/github/issues-pr-closed-raw/JacobLee23/SABRmetrics" alt="GitHub closed pull requests">
</a>
</div>
***
## Background
An open-source library of web-scraping software for popular SABRmetrics websites.
> Sabermetrics (or originally as SABRmetrics) is the empirical analysis of baseball, especially baseball statistics that measure in-game activity.
-- [Wikipedia](https://en.wikipedia.org/wiki/Sabermetrics)
> sabermetrics, the statistical analysis of baseball data. Sabermetrics aims to quantify baseball players’ performances based on objective statistical measurements, especially in opposition to many of the established statistics (such as, for example, runs batted in and pitching wins) that give less accurate approximations of individual efficacy.
-- [Britannica](https://www.britannica.com/sports/sabermetrics)
***
## Primary Features
- Easy scraping of various Internet Player ID databases
## Installation
From PyPI:
```cmd
python -m pip install sabrmetrics
```
## Requirements
This project requires Python 3.6+.
*Note: Some of the packages listed in the project Pipfile under `dev-packages` are not compatible with Python 3.6.
This includes `pytest`, so project tests cannot be run using Python 3.6.
However, all primary dependencies (under `default`) are compatible with Python 3.6, so Python 3.6 supports most normal functionality.*
## Dependencies
- [`beautifulsoup4`](https://pypi.org/project/beautifulsoup4/) ([Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/))
- [`lxml`](https://pypi.org/project/lxml/) ([Documentation](https://lxml.de/))
- [`numpy`](https://pypi.org/project/numpy/) ([Documentation](https://numpy.org/doc/))
- [`pandas`](https://pypi.org/project/pandas/) ([Documentation](https://pandas.pydata.org/pandas-docs/stable/))
- [`python-dateutil`](https://pypi.org/project/python-dateutil/) ([Documentation](https://dateutil.readthedocs.io/))
- [`requests`](https://pypi.org/project/requests/) ([Documentation](https://requests.readthedocs.io/))
This project uses [`pipenv`](https://pypi.org/project/pipenv/) ([Documentation](https://pipenv.pypa.io/en/latest/)) for virtual environment dependency management.
See the [Pipfile](https://github.com/JacobLee23/SABRmetrics/blob/master/Pipfile) to see a full list of package dependencies, including development dependencies.
## Testing
The tests for this project are written using [`pytest`](https://pypi.org/project/pytest) ([Documentation](https://docs.pytest.org/)).
To run the project tests, run:
```cmd
pytest sabrmetrics/tests/
```
The `pipenv` command script shorthand defined in the project [Pipfile][Pipfile] can also be used to run the project tests; run:
```cmd
pipenv pytest
```
Another `pipenv` command script shorthand is defined in the project [Pipfile][Pipfile] can be used to output an HTML test report, using [`pytest-html`](https://pypi.org/project/pytest-html) ([Documentation](https://pytest-html.readthedocs.io/)); run:
```cmd
pipenv pytest-html
```
## License
This project is license under the [MIT License][LICENSE].
## Documentation
[][Documentation]
The documentation for this project is hosted by [Read the Docs](https://readthedocs.org/): [Official Documentation][Documentation]
[Documentation]: https://sabrmetrics.readthedocs.io/en/latest/
[LICENSE]: https://github.com/JacobLee23/SABRmetrics/blob/master/LICENSE
[Pipfile]: https://github.com/JacobLee23/SABRmetrics/blob/master/Pipfile
| /sabrmetrics-0.5.3.tar.gz/sabrmetrics-0.5.3/README.md | 0.41478 | 0.914977 | README.md | pypi |
# SACAD
## Smart Automatic Cover Art Downloader
[](https://pypi.python.org/pypi/sacad/)
[](https://aur.archlinux.org/packages/sacad/)
[](https://github.com/desbma/sacad/actions)
[](https://coveralls.io/github/desbma/sacad?branch=master)
[](https://github.com/desbma/sacad)
[](https://pypi.python.org/pypi/sacad/)
[](https://github.com/desbma/sacad/blob/master/LICENSE)
SACAD is a multi platform command line tool to download album covers without manual intervention, ideal for integration in scripts, audio players, etc.
SACAD also provides a second command line tool, `sacad_r`, to scan a music library, read metadata from audio tags, and download missing covers automatically, optionally embedding the image into audio audio files.
## Features
- Can target specific image size, and find results for high resolution covers
- Support JPEG and PNG formats
- Customizable output: save image along with the audio files / in a different directory named by artist/album / embed cover in audio files...
- Currently support the following cover sources:
- Amazon CD (.com, .ca, .cn, .fr, .de, .co.jp and .co.uk variants)
- Amazon digital music
- ~~CoverLib~~ (site is dead)
- Deezer
- Discogs
- ~~Google Images~~ (removed, too unreliable)
- Last.fm
- Itunes
- Smart sorting algorithm to select THE best cover for a given query, using several factors: source reliability, image format, image size, image similarity with reference cover, etc.
- Automatically crunch images with optipng, oxipng or jpegoptim (can save 30% of filesize without any loss of quality, great for portable players)
- Cache search results locally for faster future search
- Do everything to avoid getting blocked by the sources: hide user-agent and automatically take care of rate limiting
- Automatically convert/resize image if needed
- Multiplatform (Windows/Mac/Linux)
SACAD is designed to be robust and be executed in batch of thousands of queries:
- HTML parsing is done without regex but with the LXML library, which is faster, and more robust to page changes
- When the size of an image reported by a source is not reliable (ie. Google Images), automatically download the first KB of the file to get its real size from the file header
- Process several queries simultaneously (using [asyncio](https://docs.python.org/3/library/asyncio.html)), to speed up processing
- Automatically reuse TCP connections (HTTP Keep-Alive), for better network performance
- Automatically retry failed HTTP requests
- Music library scan supports all common audio formats (MP3, AAC, Vorbis, FLAC..)
- Cover sources page or API changes are quickly detected, thanks to high test coverage, and SACAD is quickly updated accordingly
## Installation
SACAD requires [Python](https://www.python.org/downloads/) >= 3.7.
### Standalone Windows executable
Windows users can download a [standalone binary](https://github.com/desbma/sacad/releases/latest) which does not require Python.
### Arch Linux
Arch Linux users can install the [sacad](https://aur.archlinux.org/packages/sacad/) AUR package.
### From PyPI (with PIP)
1. If you don't already have it, [install pip](https://pip.pypa.io/en/stable/installing/) for Python 3
2. Install SACAD: `pip3 install sacad`
### From source
1. If you don't already have it, [install setuptools](https://pypi.python.org/pypi/setuptools#installation-instructions) for Python 3
2. Clone this repository: `git clone https://github.com/desbma/sacad`
3. Install SACAD: `python3 setup.py install`
#### Optional
Additionally, if you want to benefit from image crunching (lossless recompression to save additional space):
- Install [oxipng](https://github.com/shssoichiro/oxipng) or [optipng](http://optipng.sourceforge.net/)
- Install [jpegoptim](http://freecode.com/projects/jpegoptim)
On Ubuntu and other Debian derivatives, you can install them with `sudo apt-get install optipng jpegoptim`.
Note that depending of the speed of your CPU, crunching may significantly slow down processing as it is very CPU intensive (especially with optipng).
## Command line usage
Two tools are provided: `sacad` to search and download one cover, and `sacad_r` to scan a music library and download all missing covers.
Run `sacad -h` / `sacad_r -h` to get full command line reference.
### Examples
To download the cover of _Master of Puppets_ from _Metallica_, to the file `AlbumArt.jpg`, targetting ~ 600x600 pixel resolution: `sacad "metallica" "master of puppets" 600 AlbumArt.jpg`.
To download covers for your library with the same parameters as previous example: `sacad_r library_directory 600 AlbumArt.jpg`.
## Limitations
- Only supports front covers
## Adding cover sources
Adding a new cover source is very easy if you are a Python developer, you need to inherit the `CoverSource` class and implement the following methods:
- `getSearchUrl(self, album, artist)`
- `parseResults(self, api_data)`
- `updateHttpHeaders(self, headers)` (optional)
See comments in the code for more information.
## License
[Mozilla Public License Version 2.0](https://www.mozilla.org/MPL/2.0/)
| /sacad-2.7.5.tar.gz/sacad-2.7.5/README.md | 0.903586 | 0.749592 | README.md | pypi |
from serial import Serial
from thermocouples_reference import thermocouples
class InvalidChannel(Exception):
pass
class InvalidOption(Exception):
pass
class SACADA(object):
"""SACADA boards Python interface
Right now it just implements support for SACADA Mini using SCPI
over USB-CDC. The plan is to provide a generic base class for use
with all SACADA boards.
"""
IN_CHANNELS = ["A0", "A1", "A2", "A3", "A4", "INSTR", "TC"]
OUT_CHANNELS = ["CH1", "CH2"]
def __init__(self, location):
self.open(location)
self._zero = 0
def open(self, location):
self._serial = Serial(location, timeout=1) # We use USB-CDC so baud rate doesn't matter
def close(self):
self._serial.close()
def identify(self):
return self.sendSCPICommand("*IDN?")
def sendSCPICommand(self, command):
self._write(command + "\r\n") # SACADA Mini requires \r\n at the end of commands
return self._read()
def readVoltage(self, channel):
if channel not in self.IN_CHANNELS:
raise InvalidChannel("{} is not a valid input channel".format(channel))
return float(self.sendSCPICommand("MEAS:VOLT:DC? {}".format(channel)))
def setVoltage(self, channel, voltage):
if channel not in self.OUT_CHANNELS:
raise InvalidChannel("{} is not a valid output channel".format(channel))
return float(self.sendSCPICommand("SET:VOLT:DC {} {}".format(channel, voltage)))
def readTemperature(self, tref, _type='R'):
voltage = self.readVoltage("TC")
return thermocouples[_type].inverse_CmV((voltage - self._zero)*1000, Tref=tref)
def zero(self, samples=5):
# Take the average of a few measurements
average = 0
for i in range(samples): # Arbitrary
average += self.readVoltage("TC")
self._zero = average/samples
return self._zero
def _write(self, m):
self._serial.write(bytes(m, "utf-8"))
def _read(self):
return self._serial.readline().strip() | /sacada-python-0.0.7.tar.gz/sacada-python-0.0.7/sacada/SACADA.py | 0.800848 | 0.224757 | SACADA.py | pypi |
# Creating a SACC file with the library
In this example we will make a Sacc data file using simulated data from CCL, the Core Cosmology Library.
```
pylab inline
import sacc
import pyccl as ccl
import datetime
import time
```
# Getting input data
We will make some fake window ranges and data vectors from theory predictions using CCL. We will also need fake N(z) distributions in that case.
We will generate 4 bins of galaxy density w(theta) measurements.
```
galaxy_density_cl = sacc.standard_types.galaxy_density_cl
nbin = 4
```
We will generate 20 ell bins from ell=5 to ell=1000
```
ell_edges = np.linspace(5, 1000, 20).astype(int)
# Generate the ranges (ell_min, ell_max)
window_ranges = list(zip(ell_edges[:-1], ell_edges[1:]))
effective_ell = np.array([mean(x) for x in window_ranges])
ell_max = window_ranges[-1][-1]+1
print("Ell window ranges:")
print(window_ranges)
```
Construct the mock N(z) values and use CCL to calculate the C_ell values in band powers corresponding to it.
We use a constant bias within each bin
```
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.72, n_s=0.96, A_s=2.1e-9)
N_z = []
spectra = []
errors = []
# Z sample values
z = np.arange(0., 3.0, 0.01)
for i in range(nbin):
# Construct n(z)
mean_z = 0.3*(1+i)
sigma_z = 0.05
n_z = np.exp(-0.5*(z - mean_z)**2 / sigma_z**2)
N_z.append(n_z)
# Use a constant bias in each bin and make a tracer
bias = np.ones_like(z) * 1.5*(mean_z/0.3)**0.5
T = ccl.NumberCountsTracer(cosmo, False, (z,N_z[i]), (z, bias))
# Calculate C_ell over the full ell range (for each integer value)
ell = np.arange(ell_max+1)
cl = ccl.angular_cl(cosmo, T, T, ell)
# Apply window functions to get band powers
cb = []
for m,M in window_ranges:
cb.append(np.mean(cl[m:M]))
spectra.append(cb)
# Plot each tomographic bin
errors.append(np.ones_like(cb)*1e-6)
line, = loglog(ell, cl)
errorbar(effective_ell, cb, yerr=errors[-1], fmt='.', color=line.get_color())
xlim(10,ell_max)
xlabel("ell")
ylabel("C_ell")
variances = np.concatenate(errors)**2
```
# Constructing the data set object
We now construct the Sacc data set object, using all the different ingredients.
We initially start with a blank data set object, and gradually fill it in.
```
S = sacc.Sacc()
```
Save some metadata. This is all stored in FITS headers, so can only be simple scalar values
```
S.metadata['nbin_lens'] = 4
S.metadata['nbin_source'] = 0
S.metadata['creator'] = 'Beloved User'
S.metadata['creation'] = datetime.datetime.now().isoformat()
S.metadata['info'] = 'Theory predictions using CCL'
```
Now we create some window objects representing the window functions.
As an example here we will also create some window functions for mock observations of a pretend new data type.
```
# In this case we use the same top-hat window for each tomographic bin.
# In reality we would probably use a different window for each
# pair of bins, empirically determined.
windows = [sacc.TopHatWindow(m,M) for m,M in window_ranges]
# Imaginary new statistic's window that I just made up
windows_1pt = []
for i in range(5):
v = np.arange(i*10., (i+1)*10.)
w = np.random.uniform(size=10)**2
windows_1pt.append(sacc.Window(v,w))
```
Now we add the tracers and data points
```
for i in range(nbin):
# Add the appropriate tracer
S.add_tracer('NZ', f'bin_{i}', z, N_z[i])
# Get the binned spectrum values
cb = spectra[i]
# In this case we are just doing LSS, so
# there is just a single bin, repeated.
bin_name = f'bin_{i}'
# Add the values
S.add_ell_cl(galaxy_density_cl, bin_name, bin_name, effective_ell, cb, window=windows)
```
We an specify Sacc covariances in several ways. If we supply a 1D vector it will be interpreted as the variances of a diagonal covariances
```
S.add_covariance(variances)
```
We can tag data points with additional information if we want
```
# Just for fun we add arbitrary tags to a few data points
S.data[4].tags['flag'] = 42
S.data[10].tags['flag'] = 42
# An example cut
cut = S.indices("galaxy_density_cl", ell__gt=900)
print(f"Removing {len(cut)} data points")
S.remove_indices(cut)
# Could also write this to do the cut directly.
# This won't remove anything because we just did the same cut above
S.remove_selection('galaxy_density_cl', ell_gt=900)
```
# I/O
Let's save to a FITS file now and reload.
```
S.save_fits('tmp.fits', overwrite=True)
S2 = sacc.Sacc.load_fits('tmp.fits')
```
# Plotting
Let's plot the original data file and the re-loaded one, to check they agree.
```
# First the original data set
for b1, b2 in S.get_tracer_combinations('galaxy_density_cl'):
# We can use this convenience function to pull out the ell values we want
ell, cl, covmat = S.get_ell_cl('galaxy_density_cl', b1, b2, return_cov=True)
errorbar(ell, cl, yerr=covmat.diagonal()**0.5, fmt='.', label=f'{b1}-{b2}')
xscale('log')
yscale('log')
title("Galaxy C_ell")
legend()
# Now another plot of the second data set that we saved and loaded
for b1, b2 in S2.get_tracer_combinations('galaxy_density_cl'):
ell, cl, covmat = S2.get_ell_cl('galaxy_density_cl', b1, b2, return_cov=True)
errorbar(ell, cl, yerr=covmat.diagonal()**0.5, fmt='.', label=f'{b1}-{b2}')
xscale('log')
yscale('log')
title("Galaxy C_ell")
legend()
```
| /sacc-0.12.tar.gz/sacc-0.12/examples/Create_Sacc.ipynb | 0.672117 | 0.934873 | Create_Sacc.ipynb | pypi |
# SACC file for CMB and LSS data
This example shows how to use the different functionality in SACC to write data from LSS-like and CMB-like experiments.
```
import sacc
import pyccl as ccl
import numpy as np
import matplotlib.pyplot as plt
```
## Generate the data
We will first use CCL to generate some data. This will include all the auto- and cross-correlations between a redshift bin with galaxy clustering and cosmic shear, one CMB lensing map and I/Q/U maps in a given frequency channel.
```
# Setup (cosmology, number of bins, number of bands, ell range etc.)
d_ell = 10
n_ell = 100
ells = (np.arange(100) + 0.5) * d_ell
n_ell = len(ells)
# density, shear_E, shear_B, I, Q, U, kappa
n_maps = 3 + 3 + 1
# Cosmology
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05, h=0.72, n_s=0.96, A_s=2.1e-9)
```
### LSS power spectra
First we create the galaxy clustering (GC), weak lensing (WL) and CMB lensing tracers with CCL for which we will compute power spectra
```
# CCL tracers
z = np.linspace(0., 2., 1000)
nz = z**2 * np.exp(-(z / 0.25)**1.5)
bz = 1 + z
ndens = 10. # 10 gals per amin^2
# 3 tracers
gc = ccl.NumberCountsTracer(cosmo, False, (z,nz), (z, bz))
sh = ccl.WeakLensingTracer(cosmo, (z,nz))
ck = ccl.CMBLensingTracer(cosmo, 1100.)
# Noise power spectra
nl_gc = np.ones(n_ell) / (ndens * (60 * 180 / np.pi)**2)
nl_sh = np.ones(n_ell) * 0.28**2 / (ndens * (60 * 180 / np.pi)**2)
# Plot N(z)
plt.figure()
plt.plot(z, nz)
plt.show()
```
### Frequency maps
Now we create some information for the frequency map. Let's put it at some low frequency so that it is dominated by synchrotron, and we can ignore its cross-correlation with the LSS tracers.
```
# Frequency bandpass
nu = np.linspace(1, 40, 2000)
bpass = np.exp(-((nu - 20.) / 10.)**8)
plt.figure()
plt.plot(nu,bpass)
# Beam
fwhm = 60. # 3 arcmin
sigma = (fwhm / 2.355) * np.pi / 180 / 60
ell_beam = np.arange(3000)
beam = np.exp(-ell_beam * (ell_beam + 1) * sigma**2)
plt.figure()
plt.loglog(ell_beam, beam)
plt.ylim([1E-3,1.1])
# Noise power spectrum
sigma_T = 10. # 10. uK arcmin
nl_tt = np.ones(n_ell) * (sigma_T * np.pi / 180 / 60) **2
nl_pp = 2 * nl_tt
# Signal power spectrum
cl_syn_tt = 0.01 * (ells / 80.)**(-2.4)
cl_syn_ee = 0.1 * cl_syn_tt
cl_syn_bb = 0.5 * cl_syn_ee
cl_syn_eb = 0.5 * cl_syn_bb
plt.figure()
plt.plot(ells, cl_syn_tt)
plt.plot(ells, cl_syn_ee)
plt.plot(ells, cl_syn_bb)
plt.plot(ells, nl_tt)
plt.plot(ells, nl_pp)
plt.loglog()
plt.show()
```
### Power spectra
Now let us generate all non-zero power spectra.
```
# Compute power spectra
# We will assume that the cross-correlation between clustering, lensing,
# and CMB lensing with the frequency maps is zero.
cls = np.zeros([n_maps, n_maps, n_ell])
plt.figure()
# GC - GC
cls[0, 0, :] = ccl.angular_cl(cosmo, gc, gc, ells) + nl_gc
plt.plot(ells, cls[0, 0, :], label='GG')
# GC - WL (E-only, B is zero)
cls[0, 1, :] = ccl.angular_cl(cosmo, gc, sh, ells)
cls[1, 0, :] = cls[0, 1, :]
plt.plot(ells, cls[0, 1, :], label='GL')
# GC - CMBK
cls[0, 3, :] = ccl.angular_cl(cosmo, gc, ck, ells)
cls[3, 0, :] = cls[0, 3, :]
plt.plot(ells, cls[0, 3, :], label='GK')
# WL - WL
# EE
cls[1, 1, :] = ccl.angular_cl(cosmo, sh, sh, ells) + nl_sh
# BB
cls[2, 2, :] = nl_sh
plt.plot(ells, cls[1, 1, :], label='LL')
# WL - CMBK (E-only, B is zero)
cls[1, 3, :] = ccl.angular_cl(cosmo, sh, ck, ells)
cls[3, 1, :] = cls[1, 3, :]
plt.plot(ells, cls[1, 3, :], label='LK')
# CMBK - CMBK
cls[3, 3, :] = ccl.angular_cl(cosmo, ck, ck, ells)
plt.plot(ells, cls[3, 3, :], label='KK')
# T - T
cls[4, 4, :] = cl_syn_tt
# E - E
cls[5, 5, :] = cl_syn_ee
# E - B
cls[5, 6, :] = cl_syn_eb
cls[6, 5, :] = cls[5, 6, :]
# B - B
cls[6, 6, :] = cl_syn_bb
plt.loglog()
plt.legend(loc='lower left', ncol=2)
plt.show()
```
### Bandpower window functions
For simplicity let's just assume top-hat windows
```
n_ell_large = 3001
ells_large = np.arange(n_ell_large)
window_single = np.zeros([n_ell, n_ell_large])
for i in range(n_ell):
window_single[i, i * d_ell : (i + 1) * d_ell] = 1.
plt.figure()
for w in window_single:
plt.plot(ells_large, w)
plt.xlim([200,300])
plt.show()
```
### Covariance
Finally, let's create a covariance matrix
```
fsky = 0.1
n_cross = (n_maps * (n_maps + 1)) // 2
covar = np.zeros([n_cross, n_ell, n_cross, n_ell])
id_i = 0
for i1 in range(n_maps):
for i2 in range(i1, n_maps):
id_j = 0
for j1 in range(n_maps):
for j2 in range(j1, n_maps):
cl_i1j1 = cls[i1, j1, :]
cl_i1j2 = cls[i1, j2, :]
cl_i2j1 = cls[i2, j1, :]
cl_i2j2 = cls[i2, j2, :]
# Knox formula
cov = (cl_i1j1 * cl_i2j2 + cl_i1j2 * cl_i2j1) / (d_ell * fsky * (2 * ells + 1))
covar[id_i, :, id_j, :] = np.diag(cov)
id_j += 1
id_i += 1
covar = covar.reshape([n_cross * n_ell, n_cross * n_ell])
```
## Create SACC file
We start by creating an empty `Sacc` object.
```
s = sacc.Sacc()
```
### Tracers
Now we add all maps as individual tracers.
The GC and WL maps will be `NZ` tracers, the CMBK tracer will be a `Map` tracer, and the I/Q/U maps will be two `NuMap` tracers (one for temperature, another one for polarization).
```
# GC
s.add_tracer('NZ', 'gc', # Name
quantity='galaxy_density', # Quantity
spin=0, # Spin
z=z, # z
nz=nz) # nz
# WL
s.add_tracer('NZ', 'wl', # Name
quantity='galaxy_shear', # Quantity
spin=2, # Spin
z=z, # z
nz=nz, # nz
extra_columns={'error': 0.1*nz}, # You can include extra columns for the N(z)
sigma_g=0.28) # You can add any extra information as **kwargs
# CMBK
s.add_tracer('Map', 'ck', # Name
quantity='cmb_convergence', # Quantity
spin=0, # Spin
ell=ell_beam, beam=beam) # Beam
# T
s.add_tracer('NuMap', 'B20_T', # Name
quantity='cmb_temperature', # Quantity
spin=0, # Spin
nu=nu, bandpass=bpass, # Bandpass
bandpass_extra={'error': 0.01 * bpass}, # You can add some extra bandpass data.
ell=ell_beam, beam=beam, # Beam
beam_extra={'error': 0.01 * beam},
nu_unit='GHz', # Frequency units
map_unit='uK_RJ', # Map units
)
# Q/U
s.add_tracer('NuMap', 'B20_P', # Name
quantity='cmb_polarization', # Quantity
spin=2, # Spin
nu=nu, bandpass=bpass, # Bandpass
bandpass_extra={'error': 0.01 * bpass}, # You can add some extra bandpass data.
ell=ell_beam, beam=beam, # Beam
beam_extra={'error': 0.01 * beam},
nu_unit='GHz', # Frequency units
map_unit='uK_RJ', # Map units
)
```
### Power spectra
Now we add all power spectra one-by-one
```
# Create a SACC bandpower window object
wins = sacc.BandpowerWindow(ells_large, window_single.T)
# GC-GC
s.add_ell_cl('cl_00', # Data type
'gc', # 1st tracer's name
'gc', # 2nd tracer's name
ells, # Effective multipole
cls[0, 0, :], # Power spectrum values
window=wins, # Bandpower windows
)
# GC-WL
s.add_ell_cl('cl_0e', 'gc', 'wl', ells, cls[0, 1, :], window=wins)
s.add_ell_cl('cl_0b', 'gc', 'wl', ells, cls[0, 2, :], window=wins)
# GC-CMBK
s.add_ell_cl('cl_00', 'gc', 'ck', ells, cls[0, 3, :], window=wins)
# GC-T
s.add_ell_cl('cl_00', 'gc', 'B20_T', ells, cls[0, 4, :], window=wins)
# GC-P
s.add_ell_cl('cl_0e', 'gc', 'B20_P', ells, cls[0, 5, :], window=wins)
s.add_ell_cl('cl_0b', 'gc', 'B20_P', ells, cls[0, 6, :], window=wins)
# WL-WL
s.add_ell_cl('cl_ee', 'wl', 'wl', ells, cls[1, 1, :], window=wins)
s.add_ell_cl('cl_eb', 'wl', 'wl', ells, cls[1, 2, :], window=wins)
s.add_ell_cl('cl_bb', 'wl', 'wl', ells, cls[2, 2, :], window=wins)
# WL-CMBK
s.add_ell_cl('cl_0e', 'wl', 'ck', ells, cls[1, 3, :], window=wins)
s.add_ell_cl('cl_0b', 'wl', 'ck', ells, cls[2, 3, :], window=wins)
# WL-T
s.add_ell_cl('cl_0e', 'wl', 'B20_T', ells, cls[1, 4, :], window=wins)
s.add_ell_cl('cl_0b', 'wl', 'B20_T', ells, cls[2, 4, :], window=wins)
# WL-E/B
s.add_ell_cl('cl_ee', 'wl', 'B20_P', ells, cls[1, 5, :], window=wins)
s.add_ell_cl('cl_eb', 'wl', 'B20_P', ells, cls[1, 6, :], window=wins)
s.add_ell_cl('cl_be', 'wl', 'B20_P', ells, cls[2, 5, :], window=wins)
s.add_ell_cl('cl_bb', 'wl', 'B20_P', ells, cls[2, 6, :], window=wins)
# CMBK-CMBK
s.add_ell_cl('cl_00', 'ck', 'ck', ells, cls[3, 3, :], window=wins)
# CMBK-T
s.add_ell_cl('cl_00', 'ck', 'B20_T', ells, cls[3, 4, :], window=wins)
# CMBK-P
s.add_ell_cl('cl_0e', 'ck', 'B20_P', ells, cls[3, 5, :], window=wins)
s.add_ell_cl('cl_0b', 'ck', 'B20_P', ells, cls[3, 6, :], window=wins)
# T-T
s.add_ell_cl('cl_00', 'B20_T', 'B20_T', ells, cls[4, 4, :], window=wins)
# T-P
s.add_ell_cl('cl_0e', 'B20_T', 'B20_P', ells, cls[4, 5, :], window=wins)
s.add_ell_cl('cl_0b', 'B20_T', 'B20_P', ells, cls[4, 6, :], window=wins)
# P-P
s.add_ell_cl('cl_ee', 'B20_P', 'B20_P', ells, cls[5, 5, :], window=wins)
s.add_ell_cl('cl_eb', 'B20_P', 'B20_P', ells, cls[5, 6, :], window=wins)
s.add_ell_cl('cl_bb', 'B20_P', 'B20_P', ells, cls[6, 6, :], window=wins)
```
### Covariance
Finally, add the covariance
```
s.add_covariance(covar)
```
### Writing
Finally, write it to file!
```
s.save_fits("cmblss.fits", overwrite=True)
```
| /sacc-0.12.tar.gz/sacc-0.12/examples/CMB_LSS_write.ipynb | 0.583085 | 0.966379 | CMB_LSS_write.ipynb | pypi |
# Reading a SACC file with CMB and LSS data
This example shows how to read LSS and CMB-like data from a SACC file. You should run the `CMB_LSS_write` notebook before this one in order to have some data to read!
```
import sacc
import numpy as np
import matplotlib.pyplot as plt
```
## Reading the data
This is as simple as:
```
s = sacc.Sacc.load_fits('cmblss.fits')
```
## Inspecting the contents
Let's now understand the contents of this file.
### Tracers
First, let's see what quantities have been correlated
```
for n, t in s.tracers.items():
print(t.name, t.quantity, type(t))
```
OK, so there is a CMB lensing tracer (as a `Map` tracer), a galaxy clustering tracer and a cosmic shear tracer (as `NZ` tracers), and the temperature and polarization of a given frequency map (as two `NuMap` tracers).
Let's look at the redshift distributions, bandpasses and beams.
```
# N(z)
plt.figure()
plt.plot(s.tracers['gc'].z, s.tracers['gc'].nz, 'r-', label='GC')
plt.plot(s.tracers['wl'].z, s.tracers['wl'].nz, 'k--', label='WL')
plt.xlabel('$z$', fontsize=16)
plt.ylabel('$N(z)$', fontsize=16)
plt.legend(loc='upper right')
# Bandpass
plt.figure()
plt.plot(s.tracers['B20_T'].nu, s.tracers['B20_T'].bandpass, 'r-', label='T')
plt.plot(s.tracers['B20_P'].nu, s.tracers['B20_P'].bandpass, 'k--', label='Q/U')
plt.xlabel(r'$\nu$', fontsize=16)
plt.ylabel(r'$\tau_\nu$', fontsize=16)
plt.legend(loc='upper right')
# Beam
plt.figure()
plt.plot(s.tracers['B20_T'].ell, s.tracers['B20_T'].beam)
plt.xscale('log')
plt.xlabel(r'$\ell$', fontsize=16)
plt.ylabel(r'$B_\ell$', fontsize=16)
plt.show()
```
### Power spectra
Let's find out which two-point functions are included
```
# Type of power spectra
data_types = np.unique([d.data_type for d in s.data])
print("Data types: ", data_types)
# Tracer combinations
print("Tracer combinations: ", s.get_tracer_combinations())
# Data size
print("Size: ", s.mean.size)
```
## Cutting the data
Let's remove anything that we don't care about.
First let's say we're not interested in any B-mode data
```
print(s.mean.size)
s.remove_selection(data_type='cl_eb')
s.remove_selection(data_type='cl_be')
s.remove_selection(data_type='cl_bb')
s.remove_selection(data_type='cl_0b')
print(s.mean.size)
```
Now let's ignore all power spectra involving the `B20_P` tracer, and also all cross-correlations with `B20_T`.
```
s.get_tracer_combinations()
for t in s.tracers:
s.remove_selection(tracers=('B20_P', t))
s.remove_selection(tracers=(t, 'B20_P'))
if t!='B20_T':
s.remove_selection(tracers=('B20_T', t))
s.remove_selection(tracers=(t, 'B20_T'))
s.get_tracer_combinations()
```
Finally, let's impose some scale cuts.
Since small-scale clustering is tricky, let's remove all ells > 100 for any power spectrum involving `gc`.
```
print(s.mean.size)
for t in s.tracers:
s.remove_selection(tracers=('gc', t), ell__gt=100)
s.remove_selection(tracers=(t, 'gc'), ell__gt=100)
print(s.mean.size)
```
The full covariance matrix and data vector can be obtained through `Sacc.covariance.covmat` and `Sacc.mean` respectively.
```
print(s.covariance.covmat.shape,s.mean.shape)
```
## Bandpower window functions
Let's now access bandpower window functions. To do this, we first need to get the indices corresponding to a given power spectrum and then use `get_bandpower_windows`.
```
# Get spectra
l, cl_gg, cov_gg, ind_gg = s.get_ell_cl('cl_00', 'gc', 'gc', return_cov=True, return_ind=True)
# Query windows from indices
bpw = s.get_bandpower_windows(ind_gg)
# Let's plot them!
plt.figure()
for w in bpw.weight.T:
plt.plot(bpw.values, w)
plt.xlim([0, 120]);
```
## Plotting the data
We can now do stuff with the contents of the SACC file
Let's plot all power spectra with their error bars.
```
plt.figure()
for t1, t2 in s.get_tracer_combinations():
l, cl, cov = s.get_ell_cl(None, t1, t2, return_cov=True)
err = np.sqrt(np.diag(cov))
plt.errorbar(l, cl, err, label='%s - %s' % (t1, t2))
plt.loglog()
plt.legend(ncol=2)
plt.xlabel(r'$\ell$',fontsize=16)
plt.ylabel(r'$C_\ell$',fontsize=16)
plt.show()
```
| /sacc-0.12.tar.gz/sacc-0.12/examples/CMB_LSS_read.ipynb | 0.463201 | 0.975693 | CMB_LSS_read.ipynb | pypi |
# SACC with clusters
The default SACC scripts in the directory above show how one can make a SACC object for a 3x2 point analysis. The constructor for a SACC object has additional fields for handling clusters. This notebook details how one can use those fields to create/load/split a SACC that has cluster information.
Note: this notebook is for *stacks* of clusters. Individual cluster measurements are not yet supported by SACC.
```
import sacc
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Cluster stack details
Stacks of clusters are different from regular tracers, since they are binned not only in redshift but also by richness. In this example, we have 20 cluster stacks: 5 bins in richness and 4 tomographic bins. Since this is a tomographic analysis, each cluster stack can be associated with some number of source bins. This association is handled in later cells.
The following two cells create two sets of tracers:
1. cluster stack tracers, that hold tomographic and mass-proxy (aka richness) bin edges
2. source galaxy tracers, that are associated with individual $\gamma_T$ weak lensing profiles for each stack-souce tracer pair
We could also create a new type of tracer for cluster stacks - that would make more sense in general
```
s = sacc.Sacc()
richness_bin_edges = [20, 30, 50, 80, 120, 180]
source_zbin_centers = [0.5, 0.7, 0.9, 1.1]
cluster_zbin_centers = [0.3, 0.5, 0.7, 0.9]
nbin_richness = len(richness_bin_edges) - 1
nbin_source = len(source_zbin_centers)
nbin_cluster = len(cluster_zbin_centers)
# First we build the cluster stack tracers.
# Here we will store the mass information in metadata,
# but for more complicated data we should write a new
# subclass of Tracer to store this information.
for i, z_mid in enumerate(cluster_zbin_centers):
z = np.arange(z_mid-0.1, z_mid+0.1, 0.001)
Nz = np.exp(-(z-z_mid)**2 / (2*0.03**2))
for j in range(nbin_richness):
l_min = richness_bin_edges[j]
l_max = richness_bin_edges[j+1]
name = f'clusters_{i}_{j}'
metadata = {'Mproxy_name': 'richness',
'Mproxy_min': l_min, 'Mproxy_max': l_max,
'source_name':'lsst_sources'
}
s.add_tracer('NZ', name, z, Nz, metadata=metadata)
# Now we move on to the more standard galaxy tracers -
# tomographic LSST source galaxies with 4 redshift bins
for i,z_mid in enumerate(source_zbin_centers):
# Basic n(z) information
z = np.arange(z_mid-0.1, z_mid+0.1, 0.001)
Nz = np.exp(-(z-z_mid)**2 / (2*0.025**2))
# Some random shapes of Nz to marginalise over
# We save these as extra columns
DNz=np.zeros((len(Nz),2))
DNz[:,0]=(z-z_mid)**2*0.01
DNz[:,0]-=DNz[:,0].mean()
DNz[:,1]=(z-z_mid)**3*0.01
DNz[:,1]-=DNz[:,1].mean()
extra_columns = {'DNz_0': DNz[:,0], 'DNz_1': DNz[:,1]}
s.add_tracer("NZ", f"lsst_sources_{i}", z, Nz, extra_columns=extra_columns)
```
### Data vectors and binning
The SACC holds data vectors and binning information. In this example, we have binning for cluster number counts as well as binning for cluster-source lensing profiles. Both are created in the following cell, as well as the binning information.
```
# Here we have 10 radial bins for cluster weak lensing
# Note that the "radial bins" can be actual distances or angles on the sky
radii = np.logspace(np.log10(0.5), np.log10(3), 10)
# One of our identifiers is a standard type name that is predefined
cluster_count = sacc.standard_types.cluster_mass_count_wl
# Our other one is manually defined, because it's one of those measurements
# where people pretend to know exactly how physical scale corresponds to angle.
# So we define our own tag for it.
cluster_lensing = "clusterGalaxy_densityShear_xi_tComoving"
# There is a standard format for these names. We check that we fit it
# by running the parser on it
type_details = sacc.parse_data_type_name(cluster_lensing)
print(type_details.sources, type_details.properties)
for i in range(nbin_cluster):
for j in range(nbin_richness):
# Cluster number counts data
tracer1 = f'clusters_{i}_{j}'
# random data values. For now!
mass= i*1e14
richness = 5*j
value = int(np.random.normal((i+10)*100, 100))
s.add_data_point(cluster_count, (tracer1,), value, err=100.)
# And now the cluster lensing data points
for k in range(nbin_source):
tracer2 = f"lsst_sources_{k}"
# Separate random data values for each point
for r in radii:
value = np.random.uniform(0., 10.)
s.add_data_point(cluster_lensing, (tracer1, tracer2), value, radius=r, err=2.0)
```
### Covariance matrices
Finally, the SACC object holds a covariance matrix between all of the data we use.
```
n = len(s)
C = np.zeros((n,n))
for i in range(n):
di = s.data[i]
for j in range(n):
dj = s.data[j]
if i==j and di.data_type == cluster_count:
C[i,i] = di['err']**2
elif di.data_type == cluster_lensing:
C[i,j] = 0.1 * di['err'] * dj['err']
if i==j:
C[i,j] *= 10.
C[j,i] = C[i,j]
s.add_covariance(C)
# This shuffles the data and covariance order so that it is
# organized with all the data points of the same type collected
# together
s.to_canonical_order()
# Add some meta data
s.metadata['Creator'] = 'McGyver'
s.metadata['Project'] = 'Victory'
s.save_fits("clusters.sacc", overwrite=True)
```
## Loading and splitting
A SACC object with cluster information can be loaded and split, just like the example SACC in the 3x2pt analysis.
```
s2 = sacc.Sacc.load_fits("./clusters.sacc")
for d in s2.data[:10]:
print(d)
# Printing a summary
for dt in s2.get_data_types():
ind = s2.indices(dt)
n = len(ind)
print(f"{dt}: {n} data points")
for tracers in s2.get_tracer_combinations(dt):
ind = s2.indices(dt, tracers)
n = len(ind)
tracers = '-'.join(tracers)
print(f" {tracers}: {n} data points")
# Splitting the data into two parts
s3 = s2.copy()
s4 = s2.copy()
s3.keep_selection(cluster_count)
s4.keep_selection(cluster_lensing)
print(len(s3), len(s4))
```
| /sacc-0.12.tar.gz/sacc-0.12/examples/SACC_for_clusters.ipynb | 0.513912 | 0.92421 | SACC_for_clusters.ipynb | pypi |
```
import sacc
import numpy as np
import matplotlib.pyplot as plt
```
# Read SACC files
This notebook illustrates how to read and interpret data from a SACC file.
We will read the BK15 data we wrote into SACC format in SACC_write.ipynb (so run that notebook first!).
Reading the file is as simple as:
```
# First, read data
s = sacc.Sacc.load_fits("BK15.fits")
```
Here are some examples of how to cut the data. There are lots of different slice-and-dice functions in sacc, from very convenient to very generic. Have a look at the documentation of `sacc/sacc.py`.
### E.g. 1: keep only the BB correlations
This will drop all other elements of the data vector and the corresponding elements of the covariance matrix.
```
s.keep_selection('cl_bb')
```
### E.g. 2: removing things
For instance, let's remove all multipoles larger than 400 (in this case this won't remove anything, since all power spectra are only given for ell<~300).
Note that you can use the syntax `tag__gt=X` or `tag__lt=Y` to impose scale cuts on a given scale-like tag (we're using it for `tag==ell`, but it could be something else).
```
s.remove_selection(ell__gt=400)
```
### Accessing the data vector and covariance
At this point, we can access the full data vector or covariance matrix as:
```
data_vector = s.mean
covariance = s.covariance.covmat
print(data_vector.shape)
print(covariance.shape)
```
### Accessing the different power spectra
Now let's do some plotting.
We will use `get_tracer_combinations` to loop through all cross-correlations.
We will then use `get_ell_cl` to read off the corresponding power spectra.
```
for b1, b2 in s.get_tracer_combinations():
l, cl, cov = s.get_ell_cl('cl_bb', b1, b2, return_cov=True)
# Let's plot only the auto-correlations. Otherwise it gets unwieldy
if b1==b2:
plt.figure()
plt.title(b1+" x "+b2,fontsize=14)
plt.errorbar(l, cl, yerr=np.sqrt(np.diag(cov)), fmt='r.')
plt.xlabel('$\\ell$',fontsize=14)
plt.ylabel('$D_\\ell$',fontsize=14)
plt.show()
```
### Accessing tracer information
You can also access the information describing each of the different tracers. In this case, let's plot the bandpasses.
```
plt.figure()
# Loop through tracers
for tn,t in s.tracers.items():
plt.plot(t.nu,t.bandpass/np.amax(t.bandpass),label=tn)
plt.xscale('log')
plt.legend(loc='upper right')
plt.ylim([0,1.1])
plt.xlim([10,2000])
plt.xlabel('$\\nu\\,\\,[{\\rm GHz}]$',fontsize=14)
plt.ylabel('Transmission',fontsize=14)
plt.show()
```
| /sacc-0.12.tar.gz/sacc-0.12/examples/SACC_read.ipynb | 0.406744 | 0.974749 | SACC_read.ipynb | pypi |
================
saccademodel-py
================
A least-squares optimal offline method to find saccadic reaction time and saccade duration from tracked gaze points.
You have tracked the gaze points of the following event sequence:
1. A person looks at point (A). An image appears at (B).
2. The person reacts to the image and starts to move their gaze toward (B). The *saccade* starts.
3. The gaze arrives to (B). The saccade ends.
4. The person looks at point (B).
Now you want to determine:
a. The time between events 1 and 2, called the *saccadic reaction time* or *SRT*.
b. The time between events 2 and 3, called the *saccade duration* or *SD*.
The *saccademodel* algorithm computes the times for you by fitting an ideal gaze path to the data. The ideal gaze path has the following structure. From t=0 to t=saccade_start the ideal gaze is exactly at point (A). From t=saccade_start to t=saccade_end the ideal gaze moves from (A) to (B) with constant velocity. From t=saccade_end to t=n the gaze remains at (B). The algorithm finds such times *saccade_start* and *saccade_end* that **minimize the mean squared error** between the ideal gaze path and the given tracked gaze points. In other words, the algorithm splits the data to three segments: source fixation, saccade, and target fixation.
As the **greatest advantage**, when compared to velocity-based saccade recognition methods, data does not need to be filtered beforehand because the squared error approach does that by itself. Even though filtering would yield smaller total squared error, it does not affect the estimates of *saccade_start* and *saccade_end*. However, if the noise in the data is nonstationary, some special noise filtering methods might be needed.
As the **greatest disadvantage**, the *saccademodel* algorithm is suitable only for offline analysis and therefore cannot be used in realtime setups.
1. Install
==========
With `pip
<https://pypi.python.org/pypi/saccademodel>`_::
$ pip install saccademodel
2. Usage
========
The data structure **pointlist** is used thoroughly. It is a list of points, where each point is a list [x, y].
The usage is simple::
>>> import saccademodel
>>> rawdata = [
[130.012, 404.231],
[129.234, 403.478],
[None, None],
[133.983, 450.044],
...
]
>>> results = saccademodel.fit(rawdata)
>>> print(results)
{
'source_points': [[344.682, 200.115], ...],
'saccade_points': [[324.233, 202.393], ...],
'target_points': [[556.423, 112.607], ...],
'mean_squared_error': 0.000166802
}
Note that the lengths of the returned lists can be used to determine saccadic reaction time and duration. For example, given the points from the appearance of stimulus, the saccadic reaction time is captured in the length of ``source_points`` and the saccade duration in ``saccade_points``. If the frame rate is known, you can convert the lengths to seconds by::
>>> framerate = 300.0 # samples per second
>>> saccadic_reaction_time = len(results.source_points) / framerate
>>> saccade_duration = len(results.saccade_points) / framerate
3. API
======
3.1. saccademodel.fit(gazepointlist)
------------------------------------
Parameter:
- gazepointlist: a list of [x, y] points i.e. a list of lists.
Return dict with following keys:
- source_points: the points before the saccade
- saccade_points: the points in the saccade
- target_points: the points after the saccade.
- mean_squared_error: the average squared error from the model for a point.
3.2. saccademodel.version
-------------------------
The current version string::
>>> saccademodel.version
'1.2.3'
4. For developers
=================
4.1. Virtualenv
---------------
Use virtualenv::
$ virtualenv -p python3.5 saccademodel-py
$ cd saccademodel-py
$ source bin/activate
...
$ deactivate
4.2. Jupyter Notebook
---------------------
Usage::
$ cd explore
$ jupyter notebook
Install requirements::
$ pip install --editable .[notebook]
4.3. Testing
------------
Follow `instructions to install pyenv
<http://sqa.stackexchange.com/a/15257/14918>`_ and then either run quick tests::
$ python3.5 setup.py test
or comprehensive tests for multiple Python versions in ``tox.ini``::
$ pyenv local 2.6.9 2.7.10 3.1.5 3.2.6 3.3.6 3.4.3 3.5.0
$ eval "$(pyenv init -)"
$ pyenv rehash
$ tox
Install new pyenv environments by::
$ pyenv install 3.4.5
Validate README.rst at `http://rst.ninjs.org/
<http://rst.ninjs.org/>`_
4.4. Publishing to PyPI
-----------------------
Follow `python packaging instructions
<https://python-packaging-user-guide.readthedocs.org/en/latest/distributing/>`_:
1. Create an unpacked sdist: ``$ python setup.py sdist``
2. Create a universal wheel: ``$ python setup.py bdist_wheel --universal``
3. Go to `PyPI and register the project by filling the package form
<https://pypi.python.org/pypi?%3Aaction=submit_form>`_ by uploading
``saccademodel.egg-info/PKG_INFO`` file.
4. Upload the package with twine:
1. Sign the dist: ``$ gpg --detach-sign -a dist/saccademodel-1.2.3*``
2. Upload: ``twine upload dist/saccademodel-1.2.3*`` (will ask your PyPI password)
5. Package published!
Updating the package takes same steps except the 3rd.
5. Versioning
=============
`Semantic Versioning 2.0.0
<http://semver.org/>`_
6. License
==========
`MIT License
<http://github.com/axelpale/nudged-py/blob/master/LICENSE>`_
| /saccademodel-0.1.0.tar.gz/saccademodel-0.1.0/README.rst | 0.89358 | 0.717198 | README.rst | pypi |
import numpy as np
import os
import joblib
import logging
class Perceptron:
# constructor
def __init__(self,eta:float=None, epochs:int=None):
self.weights = np.random.random(3) * 1e-4 #small random weights
training = (eta is not None) and (epochs is not None)
if training:
logging.info(f"intial weight before training \n{self.weights}")
self.eta = eta
self.epochs = epochs
def _z_outcome(self,inputs,weights):
return np.dot(inputs, weights)
def activation_function(self,z):
return np.where(z >0 , 1,0)
def fit(self,X,y):
self.X=X
self.y= y
X_with_bias = np.c_[self.X, -np.ones((len(self.X),1))]
logging.info(f"X with bias : \n {X_with_bias}")
for epoch in range(self.epochs):
logging.info("- -"*10)
logging.info(f"for eopch >> {epoch}")
logging.info("- -"*10)
z = self._z_outcome(X_with_bias,self.weights)
y_hat = self.activation_function(z)
logging.info(f"prediction value after forward pass : \n {y_hat}")
self.error = self.y - y_hat
logging.info(f"error is \n{self.error}")
# weight update
self.weights = self.weights + self.eta * np.dot(X_with_bias.T, self.error)
logging.info(f"updated weights after epoch {epoch}/{self.epochs} : \n{self.weights}")
logging.info("##"*10)
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(X),1))]
z = self._z_outcome(X_with_bias, self.weights)
return self.activation_function(z)
def total_loss(self):
total_loss = np.sum(self.error)
logging.info(f" total loss : {total_loss} \n")
return total_loss
def create_dir_return_path(self,model_dir, filename):
os.makedirs(model_dir, exist_ok=True)
return os.path.join(model_dir,filename)
def save(self,filename, model_dir=None):
if model_dir is not None:
model_file_path = self.create_dir_return_path(model_dir,filename)
joblib.dump(self,model_file_path)
else:
model_file_path = self.create_dir_return_path("model",filename)
joblib.dump(self,model_file_path)
logging.info(f"model is save at {model_file_path}")
def load(self,filepath):
return joblib.load(filepath) | /sachin_pkg-Sachinsen1295-0.0.3.tar.gz/sachin_pkg-Sachinsen1295-0.0.3/src/sachin/perceptron.py | 0.557604 | 0.253081 | perceptron.py | pypi |
import re
from functools import lru_cache
from .tokenizer_base import BaseTokenizer
def _normalize_general_and_western(sent: str) -> str:
# language-independent (general) part
# strip end-of-line hyphenation and join lines
sent = re.sub(r"\n-", "", sent)
# join lines
sent = re.sub(r"\n", " ", sent)
# handle XML escaped symbols
sent = re.sub(r""", "\"", sent)
sent = re.sub(r"&", "&", sent)
sent = re.sub(r"<", "<", sent)
sent = re.sub(r">", ">", sent)
# language-dependent (Western) part
sent = f" {sent} "
# tokenize punctuation
sent = re.sub(r"([{-~[-` -&(-+:-@/])", r" \1 ", sent)
# handle possesives
sent = re.sub(r"'s ", r" 's ", sent)
sent = re.sub(r"'s$", r" 's", sent)
# tokenize period and comma unless preceded by a digit
sent = re.sub(r"([^0-9])([\.,])", r"\1 \2 ", sent)
# tokenize period and comma unless followed by a digit
sent = re.sub(r"([\.,])([^0-9])", r" \1 \2", sent)
# tokenize dash when preceded by a digit
sent = re.sub(r"([0-9])(-)", r"\1 \2 ", sent)
return sent
def _normalize_asian(sent: str) -> str:
# Split Chinese chars and Japanese kanji down to character level
# 4E00—9FFF CJK Unified Ideographs
# 3400—4DBF CJK Unified Ideographs Extension A
sent = re.sub(r"([\u4e00-\u9fff\u3400-\u4dbf])", r" \1 ", sent)
# 31C0—31EF CJK Strokes
# 2E80—2EFF CJK Radicals Supplement
sent = re.sub(r"([\u31c0-\u31ef\u2e80-\u2eff])", r" \1 ", sent)
# 3300—33FF CJK Compatibility
# F900—FAFF CJK Compatibility Ideographs
# FE30—FE4F CJK Compatibility Forms
sent = re.sub(
r"([\u3300-\u33ff\uf900-\ufaff\ufe30-\ufe4f])", r" \1 ", sent)
# 3200—32FF Enclosed CJK Letters and Months
sent = re.sub(r"([\u3200-\u3f22])", r" \1 ", sent)
# Split Hiragana, Katakana, and KatakanaPhoneticExtensions
# only when adjacent to something else
# 3040—309F Hiragana
# 30A0—30FF Katakana
# 31F0—31FF Katakana Phonetic Extensions
sent = re.sub(
r"(^|^[\u3040-\u309f])([\u3040-\u309f]+)(?=$|^[\u3040-\u309f])",
r"\1 \2 ", sent)
sent = re.sub(
r"(^|^[\u30a0-\u30ff])([\u30a0-\u30ff]+)(?=$|^[\u30a0-\u30ff])",
r"\1 \2 ", sent)
sent = re.sub(
r"(^|^[\u31f0-\u31ff])([\u31f0-\u31ff]+)(?=$|^[\u31f0-\u31ff])",
r"\1 \2 ", sent)
sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r" \1 ", sent)
sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r" \1 ", sent)
return sent
def _remove_punct(sent: str) -> str:
return re.sub(r"[\.,\?:;!\"\(\)]", "", sent)
def _remove_asian_punct(sent: str) -> str:
sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r"", sent)
sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r"", sent)
return sent
class TercomTokenizer(BaseTokenizer):
"""Re-implementation of Tercom Tokenizer in Python 3.
See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom
Note that Python doesn't support named Unicode blocks so the mapping for
relevant blocks was taken from here:
https://unicode-table.com/en/blocks/
"""
ASIAN_PUNCT = r"([\u3001\u3002\u3008-\u3011\u3014-\u301f\uff61-\uff65\u30fb])"
FULL_WIDTH_PUNCT = r"([\uff0e\uff0c\uff1f\uff1a\uff1b\uff01\uff02\uff08\uff09])"
def __init__(self,
normalized: bool = False,
no_punct: bool = False,
asian_support: bool = False,
case_sensitive: bool = False):
"""Initialize the tokenizer.
:param normalized: Enable character normalization.
:param no_punct: Remove punctuation.
:param asian_support: Enable special treatment of Asian characters.
:param case_sensitive: Enable case sensitivity.
"""
self._normalized = normalized
self._no_punct = no_punct
self._asian_support = asian_support
self._case_sensitive = case_sensitive
@lru_cache(maxsize=None)
# Although the cache is shared across different instances, same sentence
# queries do not return invalid returns across different instances since
# `self` becomes part of the query as well.
def __call__(self, sent: str) -> str:
if not sent:
return ""
if not self._case_sensitive:
sent = sent.lower()
if self._normalized:
sent = _normalize_general_and_western(sent)
if self._asian_support:
sent = _normalize_asian(sent)
if self._no_punct:
sent = _remove_punct(sent)
if self._asian_support:
sent = _remove_asian_punct(sent)
# Strip extra whitespaces
return ' '.join(sent.split())
def signature(self):
return 'tercom' | /sacrebleu_macrof-2.0.1-py3-none-any.whl/sacrebleu/tokenizers/tokenizer_ter.py | 0.685844 | 0.385895 | tokenizer_ter.py | pypi |
from typing import List, Sequence, Optional, Dict
from collections import Counter
from ..utils import sum_of_lists
from .base import Score, Signature, Metric
from .helpers import extract_all_char_ngrams, extract_word_ngrams
class CHRFSignature(Signature):
"""A convenience class to represent the reproducibility signature for chrF.
:param args: key-value dictionary passed from the actual metric instance.
"""
def __init__(self, args: dict):
"""`CHRFSignature` initializer."""
super().__init__(args)
self._abbr.update({
'case': 'c',
'eff': 'e',
'nc': 'nc',
'nw': 'nw',
'space': 's',
})
self.info.update({
'case': 'lc' if args['lowercase'] else 'mixed',
'eff': 'yes' if not args['eps_smoothing'] else 'no',
'nc': args['char_order'],
'nw': args['word_order'],
'space': 'yes' if args['whitespace'] else 'no',
})
class CHRFScore(Score):
"""A convenience class to represent chrF scores.
:param score: The chrF (chrF++) score.
:param char_order: The character n-gram order.
:param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++.
:param beta: Determine the importance of recall w.r.t precision.
"""
def __init__(self, score: float, char_order: int, word_order: int, beta: int):
"""`CHRFScore` initializer."""
self.beta = beta
self.char_order = char_order
self.word_order = word_order
# Add + signs to denote chrF+ variant
name = f'chrF{self.beta}' + '+' * self.word_order
super().__init__(name, score)
class CHRF(Metric):
"""Computes the chrF(++) metric given hypotheses and references.
:param char_order: Character n-gram order.
:param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
:param beta: Determine the importance of recall w.r.t precision.
:param lowercase: Enable case-insensitivity.
:param whitespace: If `True`, include whitespaces when extracting character n-grams.
:param eps_smoothing: If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. Otherwise,
it takes into account effective match order similar to sacreBLEU < 2.0.0.
:param references: A sequence of reference documents with document being
defined as a sequence of reference strings. If given, the reference n-grams
will be pre-computed and cached for faster re-computation across many systems.
"""
# Maximum character n-gram order to take into account
CHAR_ORDER = 6
# chrF+ additionally takes into account some of the word n-grams
WORD_ORDER = 0
# Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341)
BETA = 2
# Cache string.punctuation for chrF+' punctuation stripper
_PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
_SIGNATURE_TYPE = CHRFSignature
def __init__(self, char_order: int = CHAR_ORDER,
word_order: int = WORD_ORDER,
beta: int = BETA,
lowercase: bool = False,
whitespace: bool = False,
eps_smoothing: bool = False,
references: Optional[Sequence[Sequence[str]]] = None):
"""`CHRF` initializer."""
super().__init__()
self.beta = beta
self.char_order = char_order
self.word_order = word_order
self.order = self.char_order + self.word_order
self.lowercase = lowercase
self.whitespace = whitespace
self.eps_smoothing = eps_smoothing
if references is not None:
# Pre-compute reference ngrams
self._ref_cache = self._cache_references(references)
@staticmethod
def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]:
"""Computes the match statistics between hypothesis and reference n-grams.
:param hyp_ngrams: A `Counter` holding hypothesis n-grams.
:param ref_ngrams: A `Counter` holding reference n-grams.
:return: A list of three numbers denoting hypothesis n-gram count,
reference n-gram count and the intersection count.
"""
# Counter's internal intersection is not that fast, count manually
match_count, hyp_count = 0, 0
for ng, count in hyp_ngrams.items():
hyp_count += count
if ng in ref_ngrams:
match_count += min(count, ref_ngrams[ng])
return [
# Don't count hits if no reference exists for that n-gram
hyp_count if ref_ngrams else 0,
sum(ref_ngrams.values()),
match_count,
]
def _remove_punctuation(self, sent: str) -> List[str]:
"""Separates out punctuations from beginning and end of words for chrF.
Adapted from https://github.com/m-popovic/chrF
:param sent: A string.
:return: A list of words.
"""
tokenized = []
for w in sent.split():
if len(w) == 1:
tokenized.append(w)
else:
# NOTE: This splits '(hi)' to '(hi' and ')' (issue #124)
if w[-1] in self._PUNCTS:
tokenized += [w[:-1], w[-1]]
elif w[0] in self._PUNCTS:
tokenized += [w[0], w[1:]]
else:
tokenized.append(w)
return tokenized
def _preprocess_segment(self, sent: str) -> str:
"""Given a sentence, apply optional lowercasing.
:param sent: The input sentence string.
:return: The pre-processed output string.
"""
return sent.lower() if self.lowercase else sent
def _compute_f_score(self, statistics: List[int]) -> float:
"""Compute the chrF score given the n-gram match statistics.
:param statistics: A flattened list of 3 * (`char_order` + `word_order`)
elements giving the [hyp, ref, match] counts for each order.
:return: The final f_beta score between [0, 100].
"""
eps = 1e-16
score = 0.0
effective_order = 0
factor = self.beta ** 2
avg_prec, avg_rec = 0.0, 0.0
for i in range(self.order):
n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3]
# chrF++.py style EPS smoothing (also used by Moses and NLTK)
prec = n_match / n_hyp if n_hyp > 0 else eps
rec = n_match / n_ref if n_ref > 0 else eps
denom = factor * prec + rec
score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps
# sacreBLEU <2.0.0 style effective order smoothing
if n_hyp > 0 and n_ref > 0:
avg_prec += prec
avg_rec += rec
effective_order += 1
if self.eps_smoothing:
return 100 * score / self.order
if effective_order == 0:
avg_prec = avg_rec = 0.0
else:
avg_prec /= effective_order
avg_rec /= effective_order
if avg_prec + avg_rec:
score = (1 + factor) * avg_prec * avg_rec
score /= ((factor * avg_prec) + avg_rec)
return 100 * score
else:
return 0.0
def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore:
"""Computes the final score from already aggregated statistics.
:param stats: A list or numpy array of segment-level statistics.
:return: A `CHRFScore` object.
"""
return CHRFScore(
self._compute_f_score(stats), self.char_order,
self.word_order, self.beta)
def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore:
"""Computes the final score given the pre-computed corpus statistics.
:param stats: A list of segment-level statistics
:return: A `CHRFScore` object.
"""
return self._compute_score_from_stats(sum_of_lists(stats))
def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]:
"""Given a list of reference segments, extract the character and word n-grams.
:param refs: A sequence of reference segments.
:return: A list where each element contains n-grams per reference segment.
"""
ngrams = []
for ref in refs:
# extract character n-grams
stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace)
# Check chrF+ mode
if self.word_order > 0:
ref_words = self._remove_punctuation(ref)
for n in range(self.word_order):
stats.append(extract_word_ngrams(ref_words, n + 1))
ngrams.append(stats)
return {'ref_ngrams': ngrams}
def _compute_segment_statistics(
self, hypothesis: str, ref_kwargs: Dict) -> List[int]:
"""Given a (pre-processed) hypothesis sentence and already computed
reference n-grams, returns the best match statistics across the
references.
:param hypothesis: Hypothesis sentence.
:param ref_kwargs: A dictionary with key `ref_ngrams` which is a list
where each sublist contains n-gram counters for a particular reference sentence.
:return: A list of integers where each triplet denotes [hyp, ref, match]
statistics.
"""
best_stats = []
best_f_score = -1.0
# extract character n-grams
all_hyp_ngrams = extract_all_char_ngrams(
hypothesis, self.char_order, self.whitespace)
# Check chrF+ mode to see if we'll add word n-grams as well
if self.word_order > 0:
# Primitive tokenization: separate out punctuations
hwords = self._remove_punctuation(hypothesis)
_range = range(1, self.word_order + 1)
all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range])
# Iterate over multiple references, pick the one with best F score
for _ref_ngrams in ref_kwargs['ref_ngrams']:
stats = []
# Traverse all orders
for h, r in zip(all_hyp_ngrams, _ref_ngrams):
stats.extend(self._get_match_statistics(h, r))
f_score = self._compute_f_score(stats)
if f_score > best_f_score:
best_f_score = f_score
best_stats = stats
return best_stats | /sacrebleu_macrof-2.0.1-py3-none-any.whl/sacrebleu/metrics/chrf.py | 0.961416 | 0.396652 | chrf.py | pypi |
from collections import Counter
from typing import List, Tuple
def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]:
"""Extracts all ngrams (min_order <= n <= max_order) from a sentence.
:param line: A string sentence.
:param min_order: Minimum n-gram order.
:param max_order: Maximum n-gram order.
:return: a Counter object with n-grams counts and the sequence length.
"""
ngrams = []
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngrams.append(tuple(tokens[i: i + n]))
return Counter(ngrams), len(tokens)
def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
"""Extracts n-grams with order `n` from a list of tokens.
:param tokens: A list of tokens.
:param n: The order of n-grams.
:return: a Counter object with n-grams counts.
"""
return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter:
"""Yields counts of character n-grams from a sentence.
:param line: A segment containing a sequence of words.
:param n: The order of the n-grams.
:param include_whitespace: If given, will not strip whitespaces from the line.
:return: a dictionary containing ngrams and counts
"""
if not include_whitespace:
line = ''.join(line.split())
return Counter([line[i:i + n] for i in range(len(line) - n + 1)])
def extract_all_char_ngrams(
line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]:
"""Extracts all character n-grams at once for convenience.
:param line: A segment containing a sequence of words.
:param max_order: The maximum order of the n-grams.
:param include_whitespace: If given, will not strip whitespaces from the line.
:return: a list of Counter objects containing ngrams and counts.
"""
counters = []
if not include_whitespace:
line = ''.join(line.split())
for n in range(1, max_order + 1):
ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)])
counters.append(ngrams)
return counters | /sacrebleu_macrof-2.0.1-py3-none-any.whl/sacrebleu/metrics/helpers.py | 0.93117 | 0.624007 | helpers.py | pypi |
import math
import logging
from importlib import import_module
from typing import List, Sequence, Optional, Dict, Any
from ..utils import my_log, sum_of_lists
from .base import Score, Signature, Metric
from .helpers import extract_all_word_ngrams
sacrelogger = logging.getLogger('sacrebleu')
# The default for the maximum n-gram order when computing precisions
MAX_NGRAM_ORDER = 4
_TOKENIZERS = {
'none': 'tokenizer_base.BaseTokenizer',
'zh': 'tokenizer_zh.TokenizerZh',
'13a': 'tokenizer_13a.Tokenizer13a',
'intl': 'tokenizer_intl.TokenizerV14International',
'char': 'tokenizer_char.TokenizerChar',
'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab',
}
def _get_tokenizer(name: str):
"""Dynamically import tokenizer as importing all is slow."""
module_name, class_name = _TOKENIZERS[name].rsplit('.', 1)
return getattr(
import_module(f'.tokenizers.{module_name}', 'sacrebleu'),
class_name)
class BLEUSignature(Signature):
"""A convenience class to represent the reproducibility signature for BLEU.
:param args: key-value dictionary passed from the actual metric instance.
"""
def __init__(self, args: dict):
"""`BLEUSignature` initializer."""
super().__init__(args)
self._abbr.update({
'case': 'c',
'eff': 'e',
'tok': 'tok',
'smooth': 's',
})
# Construct a combined string for smoothing method and value
smooth_str = args['smooth_method']
smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str]
# If the method requires a parameter, add it within brackets
if smooth_def is not None:
# the following can be None if the user wants to use the default
smooth_val = args['smooth_value']
if smooth_val is None:
smooth_val = smooth_def
smooth_str += f'[{smooth_val:.2f}]'
self.info.update({
'case': 'lc' if args['lowercase'] else 'mixed',
'eff': 'yes' if args['effective_order'] else 'no',
'tok': args['tokenizer_signature'],
'smooth': smooth_str,
})
class BLEUScore(Score):
"""A convenience class to represent BLEU scores.
:param score: The BLEU score.
:param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order
:param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order
:param precisions: List of precisions, 1 <= n <= max_ngram_order
:param bp: The brevity penalty.
:param sys_len: The cumulative system length.
:param ref_len: The cumulative reference length.
"""
def __init__(self, score: float, counts: List[int], totals: List[int],
precisions: List[float], bp: float,
sys_len: int, ref_len: int):
"""`BLEUScore` initializer."""
super().__init__('BLEU', score)
self.bp = bp
self.counts = counts
self.totals = totals
self.sys_len = sys_len
self.ref_len = ref_len
self.precisions = precisions
self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions])
self.ratio = self.sys_len / self.ref_len if self.ref_len else 0
# The verbose part of BLEU
self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} "
self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} "
self._verbose += f"ref_len = {self.ref_len:d})"
class BLEU(Metric):
"""Computes the BLEU metric given hypotheses and references.
:param lowercase: If True, lowercased BLEU is computed.
:param force: Ignore data that looks already tokenized.
:param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default.
:param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none').
:param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
:param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
:param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
`True`, if sentence-level BLEU will be computed.
:param trg_lang: An optional language code to raise potential tokenizer warnings.
:param references: A sequence of reference documents with document being
defined as a sequence of reference strings. If given, the reference n-grams
and lengths will be pre-computed and cached for faster BLEU computation
across many systems.
"""
SMOOTH_DEFAULTS: Dict[str, Optional[float]] = {
# The defaults for `floor` and `add-k` are obtained from the following paper
# A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU
# Boxing Chen and Colin Cherry
# http://aclweb.org/anthology/W14-3346
'none': None, # No value is required
'floor': 0.1,
'add-k': 1,
'exp': None, # No value is required
}
TOKENIZERS = ['none', 'zh', '13a', 'char', 'intl', 'ja-mecab']
# mteval-v13a.pl tokenizer unless Chinese or Japanese is provided
TOKENIZER_DEFAULT = '13a'
# Some language specific mappings to use if `trg_lang` is given
# and the tokenizer is not explicitly specified
_TOKENIZER_MAP = {
'zh': 'zh',
'ja': 'ja-mecab',
}
_SIGNATURE_TYPE = BLEUSignature
def __init__(self, lowercase: bool = False,
force: bool = False,
tokenize: Optional[str] = '13a',
smooth_method: str = 'exp',
smooth_value: Optional[float] = None,
max_ngram_order: int = MAX_NGRAM_ORDER,
effective_order: bool = False,
trg_lang: str = '',
references: Optional[Sequence[Sequence[str]]] = None):
"""`BLEU` initializer."""
super().__init__()
self._force = force
self.trg_lang = trg_lang
self.lowercase = lowercase
self.smooth_value = smooth_value
self.smooth_method = smooth_method
self.max_ngram_order = max_ngram_order
self.effective_order = effective_order
# Sanity check
assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \
"Unknown smooth_method {self.smooth_method!r}"
# Default tokenizer logic
if tokenize is None:
best_tokenizer = self.TOKENIZER_DEFAULT
# Set `zh` or `ja-mecab` if target language is provided
if self.trg_lang in self._TOKENIZER_MAP:
best_tokenizer = self._TOKENIZER_MAP[self.trg_lang]
else:
best_tokenizer = tokenize
if self.trg_lang == 'zh' and best_tokenizer != 'zh':
sacrelogger.warning(
"You should use the 'zh' tokenizer for Chinese.")
if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab':
sacrelogger.warning(
"You should use the 'ja-mecab' tokenizer for Japanese.")
# Create the tokenizer
self.tokenizer = _get_tokenizer(best_tokenizer)()
# Build the signature
self.tokenizer_signature = self.tokenizer.signature()
if references is not None:
# Pre-compute reference ngrams and lengths
self._ref_cache = self._cache_references(references)
@staticmethod
def compute_bleu(correct: List[int],
total: List[int],
sys_len: int,
ref_len: int,
smooth_method: str = 'none',
smooth_value=None,
effective_order: bool = False,
max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore:
"""Computes BLEU score from its sufficient statistics with smoothing.
Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
- none: No smoothing.
- floor: Method 1 (requires small positive value (0.1 in the paper) to be set)
- add-k: Method 2 (Generalizing Lin and Och, 2004)
- exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl)
:param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order
:param total: List of counts of total ngrams, 1 <= n <= max_ngram_order
:param sys_len: The cumulative system length
:param ref_len: The cumulative reference length
:param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
:param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
:param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
`True`, if sentence-level BLEU will be computed.
:param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
:return: A `BLEUScore` instance.
"""
assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \
"Unknown smooth_method {smooth_method!r}"
# Fetch the default value for floor and add-k
if smooth_value is None:
smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method]
# Compute brevity penalty
if sys_len < ref_len:
bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
else:
bp = 1.0
# n-gram precisions
precisions = [0.0 for x in range(max_ngram_order)]
# Early stop if there are no matches (#141)
if not any(correct):
return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len)
smooth_mteval = 1.
eff_order = max_ngram_order
for n in range(1, len(precisions) + 1):
if smooth_method == 'add-k' and n > 1:
correct[n - 1] += smooth_value
total[n - 1] += smooth_value
if total[n - 1] == 0:
break
# If the system guesses no i-grams, 1 <= i <= max_ngram_order,
# the BLEU score is 0 (technically undefined). This is a problem for sentence
# level BLEU or a corpus of short sentences, where systems will get
# no credit if sentence lengths fall under the max_ngram_order threshold.
# This fix scales max_ngram_order to the observed maximum order.
# It is only available through the API and off by default
if effective_order:
eff_order = n
if correct[n - 1] == 0:
if smooth_method == 'exp':
smooth_mteval *= 2
precisions[n - 1] = 100. / (smooth_mteval * total[n - 1])
elif smooth_method == 'floor':
precisions[n - 1] = 100. * smooth_value / total[n - 1]
else:
precisions[n - 1] = 100. * correct[n - 1] / total[n - 1]
# Compute BLEU score
score = bp * math.exp(
sum([my_log(p) for p in precisions[:eff_order]]) / eff_order)
return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len)
def _preprocess_segment(self, sent: str) -> str:
"""Given a sentence, lowercases (optionally) and tokenizes it
:param sent: The input sentence string.
:return: The pre-processed output string.
"""
if self.lowercase:
sent = sent.lower()
return self.tokenizer(sent.rstrip())
def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore:
"""Computes the final score from already aggregated statistics.
:param stats: A list or numpy array of segment-level statistics.
:return: A `BLEUScore` object.
"""
return self.compute_bleu(
correct=stats[2: 2 + self.max_ngram_order],
total=stats[2 + self.max_ngram_order:],
sys_len=int(stats[0]), ref_len=int(stats[1]),
smooth_method=self.smooth_method, smooth_value=self.smooth_value,
effective_order=self.effective_order)
def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore:
"""Computes the final BLEU score given the pre-computed corpus statistics.
:param stats: A list of segment-level statistics
:return: A `BLEUScore` instance.
"""
return self._compute_score_from_stats(sum_of_lists(stats))
def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int:
"""Given a hypothesis length and a list of reference lengths, returns
the closest reference length to be used by BLEU.
:param hyp_len: The hypothesis length.
:param ref_lens: A list of reference lengths.
:return: The closest reference length.
"""
closest_diff, closest_len = -1, -1
for ref_len in ref_lens:
diff = abs(hyp_len - ref_len)
if closest_diff == -1 or diff < closest_diff:
closest_diff = diff
closest_len = ref_len
elif diff == closest_diff and ref_len < closest_len:
closest_len = ref_len
return closest_len
def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
"""Given a list of reference segments, extract the n-grams and reference lengths.
The latter will be useful when comparing hypothesis and reference lengths for BLEU.
:param refs: A sequence of strings.
:return: A dictionary that will be passed to `_compute_segment_statistics()`
through keyword arguments.
"""
ngrams = None
ref_lens = []
for ref in refs:
# extract n-grams for this ref
this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order)
ref_lens.append(ref_len)
if ngrams is None:
# Set it directly for first set of refs
ngrams = this_ngrams
else:
# Merge counts across multiple references
# The below loop is faster than `ngrams |= this_ngrams`
for ngram, count in this_ngrams.items():
ngrams[ngram] = max(ngrams[ngram], count)
return {'ref_ngrams': ngrams, 'ref_lens': ref_lens}
def _compute_segment_statistics(self, hypothesis: str,
ref_kwargs: Dict) -> List[int]:
"""Given a (pre-processed) hypothesis sentence and already computed
reference n-grams & lengths, returns the best match statistics across the
references.
:param hypothesis: Hypothesis sentence.
:param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys
that denote the counter containing all n-gram counts and reference lengths,
respectively.
:return: A list of integers with match statistics.
"""
ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens']
# Extract n-grams for the hypothesis
hyp_ngrams, hyp_len = extract_all_word_ngrams(
hypothesis, 1, self.max_ngram_order)
ref_len = self._get_closest_ref_len(hyp_len, ref_lens)
# Count the stats
# Although counter has its internal & and | operators, this is faster
correct = [0 for i in range(self.max_ngram_order)]
total = correct[:]
for hyp_ngram, hyp_count in hyp_ngrams.items():
# n-gram order
n = len(hyp_ngram) - 1
# count hypothesis n-grams
total[n] += hyp_count
# count matched n-grams
if hyp_ngram in ref_ngrams:
correct[n] += min(hyp_count, ref_ngrams[hyp_ngram])
# Return a flattened list for efficient computation
return [hyp_len, ref_len] + correct + total
def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore:
"""Compute the metric for a single sentence against a single (or multiple) reference(s).
:param hypothesis: A single hypothesis string.
:param references: A sequence of reference strings.
:return: a `BLEUScore` object.
"""
if not self.effective_order:
sacrelogger.warning(
'It is recommended to enable `effective_order` for sentence-level BLEU.')
return super().sentence_score(hypothesis, references) | /sacrebleu_macrof-2.0.1-py3-none-any.whl/sacrebleu/metrics/bleu.py | 0.902037 | 0.236527 | bleu.py | pypi |
from __future__ import print_function
import os
import sys
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
BASE_DIR = ''
GLOVE_DIR = BASE_DIR + '/glove.6B/'
TEXT_DATA_DIR = BASE_DIR + '/20_newsgroup/'
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# first, build index mapping words in the embeddings set
# to their embedding vector
print('Indexing word vectors.')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
f.close()
labels.append(label_id)
print('Found %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.fit(x_train, y_train,
batch_size=128,
epochs=10,
validation_data=(x_val, y_val)) | /sacred-nbextension-0.1.0.tar.gz/sacred-nbextension-0.1.0/deep-dostoewskiy/pretrained_word_embeddings.py | 0.555676 | 0.291226 | pretrained_word_embeddings.py | pypi |
# In[1]:
'''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
# In[2]:
batch_size = 32
num_classes = 10
epochs = 200
data_augmentation = True
# In[3]:
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# In[7]:
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
# In[8]:
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
data_augmentation = False
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
# In[28]:
[(x, y) for x, y in zip(x_train, labels)][:10]
# In[30]:
import numpy as np
from quiver_engine import server
import matplotlib.image as mimg
from collections import defaultdict
labels = np.where(y_train == 1)[1]
counter = defaultdict(int)
indexes = list(range(len(labels)))
for i in np.random.choice(indexes, 10):
x = x_train[i]
y = labels[i]
counter[y] += 1
mimg.imsave('cifar_jpg/{}_{}.jpg'.format(y, counter[y]), x)
# In[32]:
get_ipython().magic('cd ~/cifar_jpg/')
server.launch(model, [str(i) for i in range(10)], 5, input_folder='.', port=5000)
# In[35]:
from vis.losses import ActivationMaximization
from vis.regularizers import TotalVariation, LPNorm
filter_indices = [1, 2, 3]
# Tuple consists of (loss_function, weight)
# Add regularizers as needed.
losses = [
(ActivationMaximization(keras_layer, filter_indices), 1),
(LPNorm(model.input), 10),
(TotalVariation(model.input), 10)
] | /sacred-nbextension-0.1.0.tar.gz/sacred-nbextension-0.1.0/deep-dostoewskiy/Untitled2.py | 0.90808 | 0.590838 | Untitled2.py | pypi |
# In[2]:
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
from keras.models import Model
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions, preprocess_input, _obtain_input_shape
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
# In[3]:
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
# In[4]:
from keras.applications import VGG16
# In[ ]:
vgg16 = VGG16() | /sacred-nbextension-0.1.0.tar.gz/sacred-nbextension-0.1.0/deep-dostoewskiy/Untitled3.py | 0.952673 | 0.727431 | Untitled3.py | pypi |
# In[8]:
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# In[9]:
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# In[ ]:
np.f
# In[10]:
import matplotlib.image as mimg
from collections import defaultdict
counter = defaultdict(int)
for x, y in zip(x_train, y_train):
counter[y] += 1
mimg.imsave('mnist_jpg/{}_{}.jpg'.format(y, counter[y]), x)
# In[11]:
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# In[12]:
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# In[13]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
# In[ ]:
import numpy as np
from quiver_engine import server
# In[ ]:
get_ipython().magic('cd mnist_quiver/')
server.launch(model, [str(i) for i in range(10)], 5, input_folder='.', port=5000) | /sacred-nbextension-0.1.0.tar.gz/sacred-nbextension-0.1.0/deep-dostoewskiy/keras-mnis.py | 0.873201 | 0.637115 | keras-mnis.py | pypi |
# sacred-tui 
ASCII art import and terminal graphics made simple.
Let's say you want something like this in your script:
```
_____________________________,----,__
|==============================<| /___\ ____,-------------.____
`------------------.-----.---.___.--' __.--'-----------------------`--.__
`._ `. =======================================
____`.___`._____,----' `--------,----------------'
/_|___________-----< ========,'
`-. ,'
`----.______,--'
```
ASCII ᴀʀᴛ ᴍᴀᴅᴇ ʙʏ Jᴏsʜᴜᴀ Bᴇʟʟ
And what if you wanted it at *exactly*, *precisely* 10 spaces to the right?
You can do this:
```
print(" _____________________________,----,__")
print(" |==============================<| /___\\ ____,-------------.____")
print(" `------------------.-----.---.___.--' __.--'-----------------------`--.__")
print(" `._ `. =======================================")
print(" ____`.___`._____,----' `--------,----------------'")
print(" /_|___________-----< ========,'")
print(" `-. ,'")
print(" `----.______,--'")
```
And add more spaces and backslashes and lose track of which line you added and then restart. **Eww no.**
Or maybe this:
```
from blessed import Terminal # pip install blessed
term = Terminal()
print(term.clear)
with open("ent_e.txt", "r") as f: # ent_e.txt contains the ASCII art itself
ent = f.readlines()
with term.location(10, 0):
for line in ent:
print(line)
```
Wait a minute:
```
_____________________________,----,__
|==============================<| /___\ ____,-------------.____
`------------------.-----.---.___.--' __.--'-----------------------`--.__
`._ `. =======================================
____`.___`._____,----' `--------,----------------'
/_|___________-----< ========,'
`-. ,'
`----.______,--'
```
*AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA the ship is falling apart!!!!*
## **Behold, sacred-tui.**
```
import sacred
scene = sacred.Scene()
cam = sacred.Camera()
sacred.clear()
scene.obj("ent_e.json", 10, 0) # asciigen ent_e.txt ent_e
cam.render()
```
and voilà!
```
_____________________________,----,__
|==============================<| /___\ ____,-------------.____
`------------------.-----.---.___.--' __.--'-----------------------`--.__
`._ `. =======================================
____`.___`._____,----' `--------,----------------'
/_|___________-----< ========,'
`-. ,'
`----.______,--'
```
Though you have to convert the ASCII art from text to the json format that my script uses (asciigen), there is no hassle for just putting the spaceship a little bit more to the right. Or a bit toward the bottom. Anywhere on the terminal.
***ANYWHERE.***
[*Maniacal laughter*](https://www.youtube.com/watch?v=gY2k8_sSTsE)
Did I also mention that it supports ANSI Color?
```
import sacred
import blessed
scene = sacred.Scene()
cam = sacred.Camera()
t = blessed.Terminal()
sacred.clear()
scene.obj("testassets/spacedock.json", 5, 0)
scene.obj("testassets/ent_e.json", 10, 3)
scene.box(0, 15, sacred.width, sacred.height - 15)
scene.txt(t.bright_red_on_bright_yellow("tests are inevitable"), 5, 30)
cam.render()
input()
```
```
----------------------------------------------------------------------------------------------------
|| || || || || || || || || ||
---------- ---------- ---------- ---------- ----------
| _____________________________,----,__ | | | |
| |==============================<| /___\ ____,-------------.____
| `------------------.-----.---.___.--' __.--'-----------------------`--.__
| | | |`._ `. =======================================
| | | |____`.___`._____,----' `--------,----------------'
| | | /_|___________-----< ========,' | |
| | | | | `-. ,' | |
| | | | | | `----.______,--' | |
---------- ---------- ---------- ---------- ----------
|| || || || || || || || || ||
----------------------------------------------------------------------------------------------------
┌────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ tests are inevitable │
│ │
│ │
└────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## *Wait a minute, I have bigger ASCII art, but `scene.render()` won't allow objects to exceed terminal size. EXPLAIN*
That's where `Camera()` and `create_stage()` comes in.
`Camera.move()` allows you to move around the stage. Using `create_stage()` you can define an area that is bigger than the terminal size.
This means you can move around your camera instead of creating individual frames just to view a different part of the large model.
# Terminology
*Camera*: V-Cam. Something like the [Virtual Camera in Adobe Animate](https://helpx.adobe.com/ca/animate/how-to/virtual-camera.html).
*Scene*: Layer of objects.
*Screen/Stage*: The work area. It can be larger than the terminal size.
*Objects*: Text, Boxes, ASCII art etc.
# Installation
Clone repo and install:
```
python3 setup.py install
```
or use pip:
```
pip install sacred-tui
```
Note: If you want to develop this, use the develop mode
```
python3 setup.py develop
```
This will recognize your changes for your workspace.
*Note: does not support Python 2*
# Documentation
### [asciigen.py](https://github.com/justaboredkid/sacred-tui/blob/master/asciigen.py)
Python script for generating ASCII json files. All you need to do is to grab some ASCII art (from the web or somewhere else), type `asciigen` in the terminal to covert it into json, and:
```
import sacred
scene = sacred.Scene()
sacred.clear()
scene.obj("test.json", 5, 10)
scene.render()
```
Usage:
```
asciigen.py <inputfile> <outputname>
```
The data is in this format:
```
{"line#":["text", SpaceBeforeText]}
```
Where `line#` means which line it is, `text` being what is printed on the line, and `SpaceBeforeText` means the, well, space before the line. Note that this tool isn't perfect, so you might have to edit the `SpaceBeforeText` part.
### sacred.width, sacred.height
These output the values of shutil.get_terminal_size() separately.
### sacred.create_stage(w=width, h=height)
This (by default) creates a stage that is the size of the terminal. By manipulating the `w` and `h` values, you can set the stage size to whatever you want.
Want a epic long chase? Increase the width and move the camera around by using `sacred.Camera()`.
*Note: The camera always starts at the top left corner of the stage*
Args:
```
w (int): width of the stage
h (int): height to the stage
```
## *sacred.Scene(object)*
Class for all objects in render. It has the following objects:
### Scene.reset()
This function clears everything in the scene, kind of like a 'Erase everything on the scene' function. It keeps the original stage size as well.
### Scene.obj(file, x=0, y=0)
Imports ASCII json files. Those are generated by `asciigen`. It adds the ASCII art to the scene as well as returning the json.
Args:
```
file (str): path to the .json file
x (int): position on the x axis
y (int): position on the y axis
```
### Scene.txt(txt, x=0, y=0)
Adds text to the position (x, y). Similar to `print()`, but you can put it wherever you want on the scene. Sacred also supports ANSI escape sequences after V0.2.1
Args:
```
txt (str): content to print at (x,y)
x (int): position on the x axis
y (int): position on the y axis
```
### Scene.box(x=0, y=0, w=t.width - 1, h=t.height - 1, style='reg', fill=" ")
Adds a box on terminal. Creates a box around the terminal by default. (Note that a character on the terminal is a tall rectangle, which means that having the same height and width does not create a square.)
V0.2.1 adds Unicode box characters. You can set the styles of the lines.
Here are the possible values for style:
* reg (regular)
* heavy
* dashed
* dashedTight (Tight dash)
* dashedTighter (Tightest dash)
*Example:*
```
import sacred
scene = sacred.Scene()
cam = sacred.Camera()
scene.box(5, 5, 10, 10, fill="*")
cam.render()
```
*Output:*
```
┌────────┐
│********│
│********│
│********│
│********│
│********│
│********│
│********│
│********│
└────────┘
```
Args:
```
x (int): position on x axis
y (int): position on y axis
w (int): width (by char)
h (int): height (by char)
style (str): sets style of the line that draws the box.
fill (str): content that fills inside box. (Will return ValueError if fill is more than one char.)
```
### Scene.fill(x=0, y=0, w=width, h=height, fill="#"):
This fills a designated area with `fill`. Like `box()` but without the lines.
*Example:*
```
import sacred
scene = sacred.Scene()
cam = sacred.Camera()
scene.fill(5, 5, 10, 10)
cam.render()
```
*Output:*
```
##########
##########
##########
##########
##########
##########
##########
##########
##########
##########
```
### ~~Scene.render()~~ (deprecated)
**`this has been deprecated. Will remove soon.`**
### Scene.export()
This function takes the stage, converts it into string, and return the stage. You could do whatever you want with it, it is a large nested list. That includes but not limited to storing it as a variable.
### Scene.restore(scr)
This takes the output of `Scene.export()` and restore it. You can restore the scene from the aforementioned variable.
## sacred.Camera(object)
Class for moving around the camera, using the terminal like a viewport.
### Camera.move(x=0, y=0)
Moves the the camera inside stage. This way you can have objects that are bigger than the terminal and it will still be able to render.
### Camera.render()
Renders (or print, if you prefer) all the objects added. Note that it prints in the order of the added objects, so each object goes on top of other objects in the order of the objects added.
~~The `multi` argument allows multi-thread rendering. *This is still work in progress.*~~
`multi` has been removed since V0.2.1
## sacred.TooLarge(Exception)
Error raised when the objects that are added are bigger than the width of the Stage itself.
So if you do this in a 80x24 stage:
```
scene.box(0, 0, 500, 500)
scene.render()
```
then this happens:
```
Scene exceeds terminal width
User: If possible, maximize the terminal window.
Dev: Make sure your x values are in check.
Traceback (most recent call last):
File "test.py", line 7, in <module>
scene.render()
File "/home/henry/Github/sacred/sacred.py", line 47, in render
printobj(obj, self.pos[i][0], self.pos[i][1])
File "/home/henry/Github/sacred/sacred.py", line 83, in printobj
raise TooLarge
sacred.TooLarge
```
## sacred.txtbox(txt, x=0, y=t.height - 1)
When `input()` is not enough. This is basically a moveable version of that, where you can set the position of it at anywhere.
By default, it prints from the bottom of the terminal.
## sacred.clear()
Clears screen.
| /sacred_tui-0.2.1.tar.gz/sacred_tui-0.2.1/README.md | 0.802633 | 0.716677 | README.md | pypi |
try: # Python3
from itertools import zip_longest
except ImportError: # Python2
from itertools import izip_longest as zip_longest
from xml.sax.saxutils import escape, unescape
from joblib import Parallel, delayed
from tqdm import tqdm
class CJKChars(object):
"""
An object that enumerates the code points of the CJK characters as listed on
http://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane
"""
# Hangul Jamo (1100–11FF)
Hangul_Jamo = (4352, 4607) # (ord(u"\u1100"), ord(u"\u11ff"))
# CJK Radicals Supplement (2E80–2EFF)
# Kangxi Radicals (2F00–2FDF)
# Ideographic Description Characters (2FF0–2FFF)
# CJK Symbols and Punctuation (3000–303F)
# Hiragana (3040–309F)
# Katakana (30A0–30FF)
# Bopomofo (3100–312F)
# Hangul Compatibility Jamo (3130–318F)
# Kanbun (3190–319F)
# Bopomofo Extended (31A0–31BF)
# CJK Strokes (31C0–31EF)
# Katakana Phonetic Extensions (31F0–31FF)
# Enclosed CJK Letters and Months (3200–32FF)
# CJK Compatibility (3300–33FF)
# CJK Unified Ideographs Extension A (3400–4DBF)
# Yijing Hexagram Symbols (4DC0–4DFF)
# CJK Unified Ideographs (4E00–9FFF)
# Yi Syllables (A000–A48F)
# Yi Radicals (A490–A4CF)
CJK_Radicals = (11904, 42191) # (ord(u"\u2e80"), ord(u"\ua4cf"))
# Phags-pa (A840–A87F)
Phags_Pa = (43072, 43135) # (ord(u"\ua840"), ord(u"\ua87f"))
# Hangul Syllables (AC00–D7AF)
Hangul_Syllables = (44032, 55215) # (ord(u"\uAC00"), ord(u"\uD7AF"))
# CJK Compatibility Ideographs (F900–FAFF)
CJK_Compatibility_Ideographs = (63744, 64255) # (ord(u"\uF900"), ord(u"\uFAFF"))
# CJK Compatibility Forms (FE30–FE4F)
CJK_Compatibility_Forms = (65072, 65103) # (ord(u"\uFE30"), ord(u"\uFE4F"))
# Range U+FF65–FFDC encodes halfwidth forms, of Katakana and Hangul characters
Katakana_Hangul_Halfwidth = (65381, 65500) # (ord(u"\uFF65"), ord(u"\uFFDC"))
# Ideographic Symbols and Punctuation (16FE0–16FFF)
Ideographic_Symbols_And_Punctuation = (
94176,
94207,
) # (ord(u"\U00016FE0"), ord(u"\U00016FFF"))
# Tangut (17000-187FF)
# Tangut Components (18800-18AFF)
Tangut = (94208, 101119) # (ord(u"\U00017000"), ord(u"\U00018AFF"))
# Kana Supplement (1B000-1B0FF)
# Kana Extended-A (1B100-1B12F)
Kana_Supplement = (110592, 110895) # (ord(u"\U0001B000"), ord(u"\U0001B12F"))
# Nushu (1B170-1B2FF)
Nushu = (110960, 111359) # (ord(u"\U0001B170"), ord(u"\U0001B2FF"))
# Supplementary Ideographic Plane (20000–2FFFF)
Supplementary_Ideographic_Plane = (
131072,
196607,
) # (ord(u"\U00020000"), ord(u"\U0002FFFF"))
ranges = [
Hangul_Jamo,
CJK_Radicals,
Phags_Pa,
Hangul_Syllables,
CJK_Compatibility_Ideographs,
CJK_Compatibility_Forms,
Katakana_Hangul_Halfwidth,
Tangut,
Kana_Supplement,
Nushu,
Supplementary_Ideographic_Plane,
]
def is_cjk(character):
"""
This checks for CJK character.
>>> CJKChars().ranges
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (94208, 101119), (110592, 110895), (110960, 111359), (131072, 196607)]
>>> is_cjk(u'\u33fe')
True
>>> is_cjk(u'\uFE5F')
False
:param character: The character that needs to be checked.
:type character: char
:return: bool
"""
return any(
[
start <= ord(character) <= end
for start, end in [
(4352, 4607),
(11904, 42191),
(43072, 43135),
(44032, 55215),
(63744, 64255),
(65072, 65103),
(65381, 65500),
(94208, 101119),
(110592, 110895),
(110960, 111359),
(131072, 196607),
]
]
)
def xml_escape(text):
"""
This function transforms the input text into an "escaped" version suitable
for well-formed XML formatting.
Note that the default xml.sax.saxutils.escape() function don't escape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> input_str = ''')| & < > ' " ] ['''
>>> expected_output = ''')| & < > ' " ] ['''
>>> escape(input_str) == expected_output
True
>>> xml_escape(input_str)
')| & < > ' " ] ['
:param text: The text that needs to be escaped.
:type text: str
:rtype: str
"""
return escape(
text,
entities={
r"'": r"'",
r'"': r""",
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
)
def xml_unescape(text):
"""
This function transforms the "escaped" version suitable
for well-formed XML formatting into humanly-readable string.
Note that the default xml.sax.saxutils.unescape() function don't unescape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> from xml.sax.saxutils import unescape
>>> s = ')| & < > ' " ] ['
>>> expected = ''')| & < > \' " ] ['''
>>> xml_unescape(s) == expected
True
:param text: The text that needs to be unescaped.
:type text: str
:rtype: str
"""
return unescape(
text,
entities={
r"'": r"'",
r""": r'"',
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
)
def pairwise(iterable):
"""
From https://docs.python.org/3/library/itertools.html#recipes
s -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks
from https://stackoverflow.com/a/16789869/610569
"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def parallelize_preprocess(func, iterator, processes, progress_bar=False):
iterator = tqdm(iterator) if progress_bar else iterator
if processes <= 1:
return map(func, iterator)
return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator) | /sacremoses_xt-0.0.44-py3-none-any.whl/sacremoses/util.py | 0.671578 | 0.365825 | util.py | pypi |
import re
import regex
from six import text_type
from itertools import chain
class MosesPunctNormalizer:
"""
This is a Python port of the Moses punctuation normalizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl
"""
EXTRA_WHITESPACE = [ # lines 21 - 30
(r"\r", r""),
(r"\(", r" ("),
(r"\)", r") "),
(r" +", r" "),
(r"\) ([.!:?;,])", r")\g<1>"),
(r"\( ", r"("),
(r" \)", r")"),
(r"(\d) %", r"\g<1>%"),
(r" :", r":"),
(r" ;", r";"),
]
NORMALIZE_UNICODE_IF_NOT_PENN = [(r"`", r"'"), (r"''", r' " ')] # lines 33 - 34
NORMALIZE_UNICODE = [ # lines 37 - 50
(u'„', r'"'),
(u'“', r'"'),
(u'”', r'"'),
(u'–', r'-'),
(u'—', r' - '),
(r' +', r' '),
(u'´', r"'"),
(u'([a-zA-Z])‘([a-zA-Z])', r"\g<1>'\g<2>"),
(u'([a-zA-Z])’([a-zA-Z])', r"\g<1>'\g<2>"),
(u'‘', r"'"),
(u'‚', r"'"),
(u'’', r"'"),
(r"''", r'"'),
(u'´´', r'"'),
(u'…', r'...'),
]
FRENCH_QUOTES = [ # lines 52 - 57
(u'\u00A0«\u00A0', r'"'),
(u'«\u00A0', r'"'),
(u'«', r'"'),
(u'\u00A0»\u00A0', r'"'),
(u'\u00A0»', r'"'),
(u'»', r'"'),
]
HANDLE_PSEUDO_SPACES = [ # lines 59 - 67
(u'\u00A0%', r'%'),
(u'nº\u00A0', u'nº '),
(u'\u00A0:', r':'),
(u'\u00A0ºC', u' ºC'),
(u'\u00A0cm', r' cm'),
(u'\u00A0\\?', u'?'),
(u'\u00A0\\!', u'!'),
(u'\u00A0;', r';'),
(u',\u00A0', r', '),
(r' +', r' '),
]
EN_QUOTATION_FOLLOWED_BY_COMMA = [(r'"([,.]+)', r'\g<1>"')]
DE_ES_FR_QUOTATION_FOLLOWED_BY_COMMA = [
(r',"', r'",'),
(r'(\.+)"(\s*[^<])', r'"\g<1>\g<2>'), # don't fix period at end of sentence
]
DE_ES_CZ_CS_FR = [
(u'(\\d)\u00A0(\\d)', r'\g<1>,\g<2>'),
]
OTHER = [
(u'(\\d)\u00A0(\\d)', r'\g<1>.\g<2>'),
]
# Regex substitutions from replace-unicode-punctuation.perl
# https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
REPLACE_UNICODE_PUNCTUATION = [
(u",", u","),
(r"。\s*", u". "),
(u"、", u","),
(u"”", u'"'),
(u"“", u'"'),
(u"∶", u":"),
(u":", u":"),
(u"?", u"?"),
(u"《", u'"'),
(u"》", u'"'),
(u")", u")"),
(u"!", u"!"),
(u"(", u"("),
(u";", u";"),
(u"」", u'"'),
(u"「", u'"'),
(u"0", u"0"),
(u"1", u'1'),
(u"2", u"2"),
(u"3", u"3"),
(u"4", u"4"),
(u"5", u"5"),
(u"6", u"6"),
(u"7", u"7"),
(u"8", u"8"),
(u"9", u"9"),
(r".\s*", u". "),
(u"~", u"~"),
(u"’", u"'"),
(u"…", u"..."),
(u"━", u"-"),
(u"〈", u"<"),
(u"〉", u">"),
(u"【", u"["),
(u"】", u"]"),
(u"%", u"%"),
]
def __init__(self, lang="en", penn=True, norm_quote_commas=True, norm_numbers=True,
pre_replace_unicode_punct=False, post_remove_control_chars=False):
"""
:param language: The two-letter language code.
:type lang: str
:param penn: Normalize Penn Treebank style quotations.
:type penn: bool
:param norm_quote_commas: Normalize quotations and commas
:type norm_quote_commas: bool
:param norm_numbers: Normalize numbers
:type norm_numbers: bool
"""
self.substitutions = [
self.EXTRA_WHITESPACE,
self.NORMALIZE_UNICODE,
self.FRENCH_QUOTES,
self.HANDLE_PSEUDO_SPACES,
]
if penn: # Adds the penn substitutions after extra_whitespace regexes.
self.substitutions.insert(1, self.NORMALIZE_UNICODE_IF_NOT_PENN)
if norm_quote_commas:
if lang == "en":
self.substitutions.append(self.EN_QUOTATION_FOLLOWED_BY_COMMA)
elif lang in ["de", "es", "fr"]:
self.substitutions.append(self.DE_ES_FR_QUOTATION_FOLLOWED_BY_COMMA)
if norm_numbers:
if lang in ["de", "es", "cz", "cs", "fr"]:
self.substitutions.append(self.DE_ES_CZ_CS_FR)
else:
self.substitutions.append(self.OTHER)
self.substitutions = list(chain(*self.substitutions))
self.pre_replace_unicode_punct = pre_replace_unicode_punct
self.post_remove_control_chars = post_remove_control_chars
def normalize(self, text):
"""
Returns a string with normalized punctuation.
"""
# Optionally, replace unicode puncts BEFORE normalization.
if self.pre_replace_unicode_punct:
text = self.replace_unicode_punct(text)
# Actual normalization.
for regexp, substitution in self.substitutions:
# print(regexp, substitution)
text = re.sub(regexp, substitution, text_type(text))
# print(text)
# Optionally, replace unicode puncts BEFORE normalization.
if self.post_remove_control_chars:
text = self.remove_control_chars(text)
return text
def replace_unicode_punct(self, text):
for regexp, substitution in self.REPLACE_UNICODE_PUNCTUATION:
text = re.sub(regexp, substitution, text_type(text))
return text
def remove_control_chars(self, text):
return regex.sub(r"\p{C}", "", text) | /sacremoses_xt-0.0.44-py3-none-any.whl/sacremoses/normalize.py | 0.567337 | 0.407216 | normalize.py | pypi |
import os
import pkgutil
class Perluniprops:
"""
This class is used to read lists of characters from the Perl Unicode
Properties (see http://perldoc.perl.org/perluniprops.html).
The files in the perluniprop.zip are extracted using the Unicode::Tussle
module from http://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm
"""
def __init__(self):
self.datadir = (
os.path.dirname(os.path.abspath(__file__)) + "/data/perluniprops/"
)
# These are categories similar to the Perl Unicode Properties
self.available_categories = [
"Close_Punctuation",
"Currency_Symbol",
"IsAlnum",
"IsAlpha",
"IsLower",
"IsN",
"IsSc",
"IsSo",
"IsUpper",
"Line_Separator",
"Number",
"Open_Punctuation",
"Punctuation",
"Separator",
"Symbol",
"Lowercase_Letter",
"Titlecase_Letter",
"Uppercase_Letter",
"IsPf",
"IsPi",
"CJKSymbols",
"CJK",
]
def chars(self, category=None):
"""
This module returns a list of characters from the Perl Unicode Properties.
They are very useful when porting Perl tokenizers to Python.
>>> from sacremoses.corpus import Perluniprops
>>> pup = Perluniprops()
>>> list(pup.chars('Open_Punctuation'))[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c']
True
>>> list(pup.chars('Currency_Symbol'))[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5']
True
>>> pup.available_categories[:5]
['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower']
:return: a generator of characters given the specific unicode character category
"""
relative_path = os.path.join("data", "perluniprops", category + ".txt")
binary_data = pkgutil.get_data("sacremoses", relative_path)
for ch in binary_data.decode("utf-8"):
yield ch
class NonbreakingPrefixes:
"""
This is a class to read the nonbreaking prefixes textfiles from the
Moses Machine Translation toolkit. These lists are used in the Python port
of the Moses' word tokenizer.
"""
def __init__(self):
self.datadir = (
os.path.dirname(os.path.abspath(__file__)) + "/data/nonbreaking_prefixes/"
)
self.available_langs = {
"catalan": "ca",
"czech": "cs",
"german": "de",
"greek": "el",
"english": "en",
"spanish": "es",
"finnish": "fi",
"french": "fr",
"irish": "ga",
"hungarian": "hu",
"icelandic": "is",
"italian": "it",
"lithuanian": "lt",
"latvian": "lv",
"dutch": "nl",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"tamil": "ta",
"cantonese": "yue",
"chinese": "zh",
}
# Also, add the lang IDs as the keys.
self.available_langs.update({v: v for v in self.available_langs.values()})
def words(self, lang=None, ignore_lines_startswith="#"):
"""
This module returns a list of nonbreaking prefixes for the specified
language(s).
>>> from sacremoses.corpus import NonbreakingPrefixes
>>> nbp = NonbreakingPrefixes()
>>> list(nbp.words('en'))[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J']
True
>>> list(nbp.words('ta'))[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89']
True
:return: a generator words for the specified language(s).
"""
# If *lang* in list of languages available, allocate apt fileid.
if lang in self.available_langs:
filenames = ["nonbreaking_prefix." + self.available_langs[lang]]
# Use non-breaking prefixes for all languages when lang==None.
elif lang == None:
filenames = [
"nonbreaking_prefix." + v for v in set(self.available_langs.values())
]
else:
filenames = ["nonbreaking_prefix.en"]
for filename in filenames:
relative_path = os.path.join("data", "nonbreaking_prefixes", filename)
binary_data = pkgutil.get_data("sacremoses", relative_path)
for line in binary_data.decode("utf-8").splitlines():
line = line.strip()
if line and not line.startswith(ignore_lines_startswith):
yield line
__all__ = ["Perluniprops", "NonbreakingPrefixes"] | /sacremoses_xt-0.0.44-py3-none-any.whl/sacremoses/corpus.py | 0.599602 | 0.330958 | corpus.py | pypi |
import os
from functools import partial
# gbk <-> big5 mappings from Mafan + Jianfan
# https://github.com/hermanschaaf/mafan
# https://code.google.com/archive/p/python-jianfan/
simplified_chinese = gbk = u"\u9515\u7691\u853c\u788d\u7231\u55f3\u5ad2\u7477\u66a7\u972d\u8c19\u94f5\u9e4c\u80ae\u8884\u5965\u5aaa\u9a9c\u9ccc\u575d\u7f62\u94af\u6446\u8d25\u5457\u9881\u529e\u7eca\u94a3\u5e2e\u7ed1\u9551\u8c24\u5265\u9971\u5b9d\u62a5\u9c8d\u9e28\u9f85\u8f88\u8d1d\u94a1\u72c8\u5907\u60eb\u9e4e\u8d32\u951b\u7ef7\u7b14\u6bd5\u6bd9\u5e01\u95ed\u835c\u54d4\u6ed7\u94cb\u7b5a\u8df8\u8fb9\u7f16\u8d2c\u53d8\u8fa9\u8fab\u82c4\u7f0f\u7b3e\u6807\u9aa0\u98d1\u98d9\u9556\u9573\u9cd4\u9cd6\u522b\u762a\u6fd2\u6ee8\u5bbe\u6448\u50a7\u7f24\u69df\u6ba1\u8191\u9554\u9acc\u9b13\u997c\u7980\u62e8\u94b5\u94c2\u9a73\u997d\u94b9\u9e41\u8865\u94b8\u8d22\u53c2\u8695\u6b8b\u60ed\u60e8\u707f\u9a96\u9eea\u82cd\u8231\u4ed3\u6ca7\u5395\u4fa7\u518c\u6d4b\u607b\u5c42\u8be7\u9538\u4faa\u9497\u6400\u63ba\u8749\u998b\u8c17\u7f20\u94f2\u4ea7\u9610\u98a4\u5181\u8c04\u8c36\u8487\u5fcf\u5a75\u9aa3\u89c7\u7985\u9561\u573a\u5c1d\u957f\u507f\u80a0\u5382\u7545\u4f25\u82cc\u6005\u960a\u9cb3\u949e\u8f66\u5f7b\u7817\u5c18\u9648\u886c\u4f27\u8c0c\u6987\u789c\u9f80\u6491\u79f0\u60e9\u8bda\u9a8b\u67a8\u67fd\u94d6\u94db\u75f4\u8fdf\u9a70\u803b\u9f7f\u70bd\u996c\u9e31\u51b2\u51b2\u866b\u5ba0\u94f3\u7574\u8e0c\u7b79\u7ef8\u4fe6\u5e31\u96e0\u6a71\u53a8\u9504\u96cf\u7840\u50a8\u89e6\u5904\u520d\u7ecc\u8e70\u4f20\u948f\u75ae\u95ef\u521b\u6006\u9524\u7f0d\u7eaf\u9e51\u7ef0\u8f8d\u9f8a\u8f9e\u8bcd\u8d50\u9e5a\u806a\u8471\u56f1\u4ece\u4e1b\u82c1\u9aa2\u679e\u51d1\u8f8f\u8e7f\u7a9c\u64ba\u9519\u9509\u9e7e\u8fbe\u54d2\u9791\u5e26\u8d37\u9a80\u7ed0\u62c5\u5355\u90f8\u63b8\u80c6\u60ee\u8bde\u5f39\u6b9a\u8d55\u7605\u7baa\u5f53\u6321\u515a\u8361\u6863\u8c20\u7800\u88c6\u6363\u5c9b\u7977\u5bfc\u76d7\u7118\u706f\u9093\u956b\u654c\u6da4\u9012\u7f14\u7c74\u8bcb\u8c1b\u7ee8\u89cc\u955d\u98a0\u70b9\u57ab\u7535\u5dc5\u94bf\u766b\u9493\u8c03\u94eb\u9cb7\u8c0d\u53e0\u9cbd\u9489\u9876\u952d\u8ba2\u94e4\u4e22\u94e5\u4e1c\u52a8\u680b\u51bb\u5cbd\u9e2b\u7aa6\u728a\u72ec\u8bfb\u8d4c\u9540\u6e0e\u691f\u724d\u7b03\u9ee9\u953b\u65ad\u7f0e\u7c16\u5151\u961f\u5bf9\u603c\u9566\u5428\u987f\u949d\u7096\u8db8\u593a\u5815\u94ce\u9e45\u989d\u8bb9\u6076\u997f\u8c14\u57a9\u960f\u8f6d\u9507\u9537\u9e57\u989a\u989b\u9cc4\u8bf6\u513f\u5c14\u9975\u8d30\u8fe9\u94d2\u9e38\u9c95\u53d1\u7f5a\u9600\u73d0\u77fe\u9492\u70e6\u8d29\u996d\u8bbf\u7eba\u94ab\u9c82\u98de\u8bfd\u5e9f\u8d39\u7eef\u9544\u9cb1\u7eb7\u575f\u594b\u6124\u7caa\u507e\u4e30\u67ab\u950b\u98ce\u75af\u51af\u7f1d\u8bbd\u51e4\u6ca3\u80a4\u8f90\u629a\u8f85\u8d4b\u590d\u8d1f\u8ba3\u5987\u7f1a\u51eb\u9a78\u7ec2\u7ecb\u8d59\u9eb8\u9c8b\u9cc6\u9486\u8be5\u9499\u76d6\u8d45\u6746\u8d76\u79c6\u8d63\u5c34\u64c0\u7ec0\u5188\u521a\u94a2\u7eb2\u5c97\u6206\u9550\u777e\u8bf0\u7f1f\u9506\u6401\u9e3d\u9601\u94ec\u4e2a\u7ea5\u9549\u988d\u7ed9\u4e98\u8d53\u7ee0\u9ca0\u9f9a\u5bab\u5de9\u8d21\u94a9\u6c9f\u82df\u6784\u8d2d\u591f\u8bdf\u7f11\u89cf\u86ca\u987e\u8bc2\u6bc2\u94b4\u9522\u9e2a\u9e44\u9e58\u5250\u6302\u9e39\u63b4\u5173\u89c2\u9986\u60ef\u8d2f\u8bd6\u63bc\u9e73\u9ccf\u5e7f\u72b7\u89c4\u5f52\u9f9f\u95fa\u8f68\u8be1\u8d35\u523d\u5326\u523f\u59ab\u6867\u9c91\u9cdc\u8f8a\u6eda\u886e\u7ef2\u9ca7\u9505\u56fd\u8fc7\u57da\u5459\u5e3c\u6901\u8748\u94ea\u9a87\u97e9\u6c49\u961a\u7ed7\u9889\u53f7\u704f\u98a2\u9602\u9e64\u8d3a\u8bc3\u9616\u86ce\u6a2a\u8f70\u9e3f\u7ea2\u9ec9\u8ba7\u836d\u95f3\u9c8e\u58f6\u62a4\u6caa\u6237\u6d52\u9e55\u54d7\u534e\u753b\u5212\u8bdd\u9a85\u6866\u94e7\u6000\u574f\u6b22\u73af\u8fd8\u7f13\u6362\u5524\u75ea\u7115\u6da3\u5942\u7f33\u953e\u9ca9\u9ec4\u8c0e\u9cc7\u6325\u8f89\u6bc1\u8d3f\u79fd\u4f1a\u70e9\u6c47\u8bb3\u8bf2\u7ed8\u8bd9\u835f\u54d5\u6d4d\u7f0b\u73f2\u6656\u8364\u6d51\u8be8\u9984\u960d\u83b7\u8d27\u7978\u94ac\u956c\u51fb\u673a\u79ef\u9965\u8ff9\u8ba5\u9e21\u7ee9\u7f09\u6781\u8f91\u7ea7\u6324\u51e0\u84df\u5242\u6d4e\u8ba1\u8bb0\u9645\u7ee7\u7eaa\u8ba6\u8bd8\u8360\u53fd\u54dc\u9aa5\u7391\u89ca\u9f51\u77f6\u7f81\u867f\u8dfb\u9701\u9c9a\u9cab\u5939\u835a\u988a\u8d3e\u94be\u4ef7\u9a7e\u90cf\u6d43\u94d7\u9553\u86f2\u6b7c\u76d1\u575a\u7b3a\u95f4\u8270\u7f04\u8327\u68c0\u78b1\u7877\u62e3\u6361\u7b80\u4fed\u51cf\u8350\u69db\u9274\u8df5\u8d31\u89c1\u952e\u8230\u5251\u996f\u6e10\u6e85\u6da7\u8c0f\u7f23\u620b\u622c\u7751\u9e63\u7b15\u9ca3\u97af\u5c06\u6d46\u848b\u6868\u5956\u8bb2\u9171\u7edb\u7f30\u80f6\u6d47\u9a84\u5a07\u6405\u94f0\u77eb\u4fa5\u811a\u997a\u7f34\u7ede\u8f7f\u8f83\u6322\u5ce4\u9e6a\u9c9b\u9636\u8282\u6d01\u7ed3\u8beb\u5c4a\u7596\u988c\u9c92\u7d27\u9526\u4ec5\u8c28\u8fdb\u664b\u70ec\u5c3d\u52b2\u8346\u830e\u537a\u8369\u9991\u7f19\u8d46\u89d0\u9cb8\u60ca\u7ecf\u9888\u9759\u955c\u5f84\u75c9\u7ade\u51c0\u522d\u6cfe\u8ff3\u5f2a\u80eb\u9753\u7ea0\u53a9\u65e7\u9604\u9e20\u9e6b\u9a79\u4e3e\u636e\u952f\u60e7\u5267\u8bb5\u5c66\u6989\u98d3\u949c\u9514\u7aad\u9f83\u9e43\u7ee2\u9529\u954c\u96bd\u89c9\u51b3\u7edd\u8c32\u73cf\u94a7\u519b\u9a8f\u76b2\u5f00\u51ef\u5240\u57b2\u5ffe\u607a\u94e0\u9534\u9f9b\u95f6\u94aa\u94d0\u9897\u58f3\u8bfe\u9a92\u7f02\u8f72\u94b6\u951e\u9894\u57a6\u6073\u9f88\u94ff\u62a0\u5e93\u88e4\u55be\u5757\u4fa9\u90d0\u54d9\u810d\u5bbd\u72ef\u9acb\u77ff\u65f7\u51b5\u8bd3\u8bf3\u909d\u5739\u7ea9\u8d36\u4e8f\u5cbf\u7aa5\u9988\u6e83\u532e\u8489\u6126\u8069\u7bd1\u9603\u951f\u9cb2\u6269\u9614\u86f4\u8721\u814a\u83b1\u6765\u8d56\u5d03\u5f95\u6d9e\u6fd1\u8d49\u7750\u94fc\u765e\u7c41\u84dd\u680f\u62e6\u7bee\u9611\u5170\u6f9c\u8c30\u63fd\u89c8\u61d2\u7f06\u70c2\u6ee5\u5c9a\u6984\u6593\u9567\u8934\u7405\u9606\u9512\u635e\u52b3\u6d9d\u5520\u5d02\u94d1\u94f9\u75e8\u4e50\u9cd3\u956d\u5792\u7c7b\u6cea\u8bd4\u7f27\u7bf1\u72f8\u79bb\u9ca4\u793c\u4e3d\u5389\u52b1\u783e\u5386\u6ca5\u96b6\u4fea\u90e6\u575c\u82c8\u8385\u84e0\u5456\u9026\u9a8a\u7f21\u67a5\u680e\u8f79\u783a\u9502\u9e42\u75a0\u7c9d\u8dde\u96f3\u9ca1\u9ce2\u4fe9\u8054\u83b2\u8fde\u9570\u601c\u6d9f\u5e18\u655b\u8138\u94fe\u604b\u70bc\u7ec3\u8539\u5941\u6f4b\u740f\u6b93\u88e2\u88e3\u9ca2\u7cae\u51c9\u4e24\u8f86\u8c05\u9b49\u7597\u8fbd\u9563\u7f2d\u948c\u9e69\u730e\u4e34\u90bb\u9cde\u51db\u8d41\u853a\u5eea\u6aa9\u8f9a\u8e8f\u9f84\u94c3\u7075\u5cad\u9886\u7eeb\u68c2\u86cf\u9cae\u998f\u5218\u6d4f\u9a9d\u7efa\u954f\u9e68\u9f99\u804b\u5499\u7b3c\u5784\u62e2\u9647\u830f\u6cf7\u73d1\u680a\u80e7\u783b\u697c\u5a04\u6402\u7bd3\u507b\u848c\u55bd\u5d5d\u9542\u7618\u8027\u877c\u9ac5\u82a6\u5362\u9885\u5e90\u7089\u63b3\u5364\u864f\u9c81\u8d42\u7984\u5f55\u9646\u5786\u64b8\u565c\u95fe\u6cf8\u6e0c\u680c\u6a79\u8f73\u8f82\u8f98\u6c07\u80ea\u9e2c\u9e6d\u823b\u9c88\u5ce6\u631b\u5b6a\u6ee6\u4e71\u8114\u5a08\u683e\u9e3e\u92ae\u62a1\u8f6e\u4f26\u4ed1\u6ca6\u7eb6\u8bba\u56f5\u841d\u7f57\u903b\u9523\u7ba9\u9aa1\u9a86\u7edc\u8366\u7321\u6cfa\u6924\u8136\u9559\u9a74\u5415\u94dd\u4fa3\u5c61\u7f15\u8651\u6ee4\u7eff\u6988\u891b\u950a\u5452\u5988\u739b\u7801\u8682\u9a6c\u9a82\u5417\u551b\u5b37\u6769\u4e70\u9ea6\u5356\u8fc8\u8109\u52a2\u7792\u9992\u86ee\u6ee1\u8c29\u7f26\u9558\u98a1\u9cd7\u732b\u951a\u94c6\u8d38\u9ebd\u6ca1\u9541\u95e8\u95f7\u4eec\u626a\u7116\u61d1\u9494\u9530\u68a6\u772f\u8c1c\u5f25\u89c5\u5e42\u8288\u8c27\u7315\u7962\u7ef5\u7f05\u6e11\u817c\u9efe\u5e99\u7f08\u7f2a\u706d\u60af\u95fd\u95f5\u7f17\u9e23\u94ed\u8c2c\u8c1f\u84e6\u998d\u6b81\u9546\u8c0b\u4ea9\u94bc\u5450\u94a0\u7eb3\u96be\u6320\u8111\u607c\u95f9\u94d9\u8bb7\u9981\u5185\u62df\u817b\u94cc\u9cb5\u64b5\u8f87\u9cb6\u917f\u9e1f\u8311\u8885\u8042\u556e\u954a\u954d\u9667\u8616\u55eb\u989f\u8e51\u67e0\u72de\u5b81\u62e7\u6cde\u82ce\u549b\u804d\u94ae\u7ebd\u8113\u6d53\u519c\u4fac\u54dd\u9a7d\u9495\u8bfa\u50a9\u759f\u6b27\u9e25\u6bb4\u5455\u6ca4\u8bb4\u6004\u74ef\u76d8\u8e52\u5e9e\u629b\u75b1\u8d54\u8f94\u55b7\u9e4f\u7eb0\u7f74\u94cd\u9a97\u8c1d\u9a88\u98d8\u7f25\u9891\u8d2b\u5ad4\u82f9\u51ed\u8bc4\u6cfc\u9887\u948b\u6251\u94fa\u6734\u8c31\u9564\u9568\u6816\u8110\u9f50\u9a91\u5c82\u542f\u6c14\u5f03\u8bab\u8572\u9a90\u7eee\u6864\u789b\u9880\u9883\u9ccd\u7275\u948e\u94c5\u8fc1\u7b7e\u8c26\u94b1\u94b3\u6f5c\u6d45\u8c34\u5811\u4f65\u8368\u60ad\u9a9e\u7f31\u6920\u94a4\u67aa\u545b\u5899\u8537\u5f3a\u62a2\u5af1\u6a2f\u6217\u709d\u9516\u9535\u956a\u7f9f\u8dc4\u9539\u6865\u4e54\u4fa8\u7fd8\u7a8d\u8bee\u8c2f\u835e\u7f32\u7857\u8df7\u7a83\u60ec\u9532\u7ba7\u94a6\u4eb2\u5bdd\u9513\u8f7b\u6c22\u503e\u9877\u8bf7\u5e86\u63ff\u9cad\u743c\u7a77\u8315\u86f1\u5def\u8d47\u866e\u9cc5\u8d8b\u533a\u8eaf\u9a71\u9f8b\u8bce\u5c96\u9612\u89d1\u9e32\u98a7\u6743\u529d\u8be0\u7efb\u8f81\u94e8\u5374\u9e4a\u786e\u9615\u9619\u60ab\u8ba9\u9976\u6270\u7ed5\u835b\u5a06\u6861\u70ed\u97e7\u8ba4\u7eab\u996a\u8f6b\u8363\u7ed2\u5d58\u877e\u7f1b\u94f7\u98a6\u8f6f\u9510\u86ac\u95f0\u6da6\u6d12\u8428\u98d2\u9cc3\u8d5b\u4f1e\u6bf5\u7cc1\u4e27\u9a9a\u626b\u7f2b\u6da9\u556c\u94ef\u7a51\u6740\u5239\u7eb1\u94e9\u9ca8\u7b5b\u6652\u917e\u5220\u95ea\u9655\u8d61\u7f2e\u8baa\u59d7\u9a9f\u9490\u9cdd\u5892\u4f24\u8d4f\u57a7\u6b87\u89de\u70e7\u7ecd\u8d4a\u6444\u6151\u8bbe\u538d\u6ee0\u7572\u7ec5\u5ba1\u5a76\u80be\u6e17\u8bdc\u8c02\u6e16\u58f0\u7ef3\u80dc\u5e08\u72ee\u6e7f\u8bd7\u65f6\u8680\u5b9e\u8bc6\u9a76\u52bf\u9002\u91ca\u9970\u89c6\u8bd5\u8c25\u57d8\u83b3\u5f11\u8f7c\u8d33\u94c8\u9ca5\u5bff\u517d\u7ef6\u67a2\u8f93\u4e66\u8d4e\u5c5e\u672f\u6811\u7ad6\u6570\u6445\u7ebe\u5e05\u95e9\u53cc\u8c01\u7a0e\u987a\u8bf4\u7855\u70c1\u94c4\u4e1d\u9972\u53ae\u9a77\u7f0c\u9536\u9e36\u8038\u6002\u9882\u8bbc\u8bf5\u64de\u85ae\u998a\u98d5\u953c\u82cf\u8bc9\u8083\u8c21\u7a23\u867d\u968f\u7ee5\u5c81\u8c07\u5b59\u635f\u7b0b\u836a\u72f2\u7f29\u7410\u9501\u5522\u7743\u736d\u631e\u95fc\u94ca\u9cce\u53f0\u6001\u949b\u9c90\u644a\u8d2a\u762b\u6ee9\u575b\u8c2d\u8c08\u53f9\u6619\u94bd\u952c\u9878\u6c64\u70eb\u50a5\u9967\u94f4\u9557\u6d9b\u7ee6\u8ba8\u97ec\u94fd\u817e\u8a8a\u9511\u9898\u4f53\u5c49\u7f07\u9e48\u9617\u6761\u7c9c\u9f86\u9ca6\u8d34\u94c1\u5385\u542c\u70c3\u94dc\u7edf\u6078\u5934\u94ad\u79c3\u56fe\u948d\u56e2\u629f\u9893\u8715\u9968\u8131\u9e35\u9a6e\u9a7c\u692d\u7ba8\u9f0d\u889c\u5a32\u817d\u5f2f\u6e7e\u987d\u4e07\u7ea8\u7efe\u7f51\u8f8b\u97e6\u8fdd\u56f4\u4e3a\u6f4d\u7ef4\u82c7\u4f1f\u4f2a\u7eac\u8c13\u536b\u8bff\u5e0f\u95f1\u6ca9\u6da0\u73ae\u97ea\u709c\u9c94\u6e29\u95fb\u7eb9\u7a33\u95ee\u960c\u74ee\u631d\u8717\u6da1\u7a9d\u5367\u83b4\u9f8c\u545c\u94a8\u4e4c\u8bec\u65e0\u829c\u5434\u575e\u96fe\u52a1\u8bef\u90ac\u5e91\u6003\u59a9\u9a9b\u9e49\u9e5c\u9521\u727a\u88ad\u4e60\u94e3\u620f\u7ec6\u9969\u960b\u73ba\u89cb\u867e\u8f96\u5ce1\u4fa0\u72ed\u53a6\u5413\u7856\u9c9c\u7ea4\u8d24\u8854\u95f2\u663e\u9669\u73b0\u732e\u53bf\u9985\u7fa1\u5baa\u7ebf\u82cb\u83b6\u85d3\u5c98\u7303\u5a34\u9e47\u75eb\u869d\u7c7c\u8df9\u53a2\u9576\u4e61\u8be6\u54cd\u9879\u8297\u9977\u9aa7\u7f03\u98e8\u8427\u56a3\u9500\u6653\u5578\u54d3\u6f47\u9a81\u7ee1\u67ad\u7bab\u534f\u631f\u643a\u80c1\u8c10\u5199\u6cfb\u8c22\u4eb5\u64b7\u7ec1\u7f2c\u950c\u8845\u5174\u9649\u8365\u51f6\u6c79\u9508\u7ee3\u9990\u9e3a\u865a\u5618\u987b\u8bb8\u53d9\u7eea\u7eed\u8be9\u987c\u8f69\u60ac\u9009\u7663\u7eda\u8c16\u94c9\u955f\u5b66\u8c11\u6cf6\u9cd5\u52cb\u8be2\u5bfb\u9a6f\u8bad\u8baf\u900a\u57d9\u6d54\u9c9f\u538b\u9e26\u9e2d\u54d1\u4e9a\u8bb6\u57ad\u5a05\u6860\u6c29\u9609\u70df\u76d0\u4e25\u5ca9\u989c\u960e\u8273\u538c\u781a\u5f66\u8c1a\u9a8c\u53a3\u8d5d\u4fe8\u5156\u8c33\u6079\u95eb\u917d\u9b47\u990d\u9f39\u9e2f\u6768\u626c\u75a1\u9633\u75d2\u517b\u6837\u7080\u7476\u6447\u5c27\u9065\u7a91\u8c23\u836f\u8f7a\u9e5e\u9cd0\u7237\u9875\u4e1a\u53f6\u9765\u8c12\u90ba\u6654\u70e8\u533b\u94f1\u9890\u9057\u4eea\u8681\u827a\u4ebf\u5fc6\u4e49\u8be3\u8bae\u8c0a\u8bd1\u5f02\u7ece\u8bd2\u5453\u5cc4\u9974\u603f\u9a7f\u7f22\u8f76\u8d3b\u9487\u9552\u9571\u7617\u8223\u836b\u9634\u94f6\u996e\u9690\u94df\u763e\u6a31\u5a74\u9e70\u5e94\u7f28\u83b9\u8424\u8425\u8367\u8747\u8d62\u9896\u8314\u83ba\u8426\u84e5\u6484\u5624\u6ee2\u6f46\u748e\u9e66\u763f\u988f\u7f42\u54df\u62e5\u4f63\u75c8\u8e0a\u548f\u955b\u4f18\u5fe7\u90ae\u94c0\u72b9\u8bf1\u83b8\u94d5\u9c7f\u8206\u9c7c\u6e14\u5a31\u4e0e\u5c7f\u8bed\u72f1\u8a89\u9884\u9a6d\u4f1b\u4fe3\u8c00\u8c15\u84e3\u5d5b\u996b\u9608\u59aa\u7ea1\u89ce\u6b24\u94b0\u9e46\u9e6c\u9f89\u9e33\u6e0a\u8f95\u56ed\u5458\u5706\u7f18\u8fdc\u6a7c\u9e22\u9f0b\u7ea6\u8dc3\u94a5\u7ca4\u60a6\u9605\u94ba\u90e7\u5300\u9668\u8fd0\u8574\u915d\u6655\u97f5\u90d3\u82b8\u607d\u6120\u7ead\u97eb\u6b92\u6c32\u6742\u707e\u8f7d\u6512\u6682\u8d5e\u74d2\u8db1\u933e\u8d43\u810f\u9a75\u51ff\u67a3\u8d23\u62e9\u5219\u6cfd\u8d5c\u5567\u5e3b\u7ba6\u8d3c\u8c2e\u8d60\u7efc\u7f2f\u8f67\u94e1\u95f8\u6805\u8bc8\u658b\u503a\u6be1\u76cf\u65a9\u8f97\u5d2d\u6808\u6218\u7efd\u8c35\u5f20\u6da8\u5e10\u8d26\u80c0\u8d75\u8bcf\u948a\u86f0\u8f99\u9517\u8fd9\u8c2a\u8f84\u9e67\u8d1e\u9488\u4fa6\u8bca\u9547\u9635\u6d48\u7f1c\u6862\u8f78\u8d48\u796f\u9e29\u6323\u7741\u72f0\u4e89\u5e27\u75c7\u90d1\u8bc1\u8be4\u5ce5\u94b2\u94ee\u7b5d\u7ec7\u804c\u6267\u7eb8\u631a\u63b7\u5e1c\u8d28\u6ede\u9a98\u6809\u6800\u8f75\u8f7e\u8d3d\u9e37\u86f3\u7d77\u8e2c\u8e2f\u89ef\u949f\u7ec8\u79cd\u80bf\u4f17\u953a\u8bcc\u8f74\u76b1\u663c\u9aa4\u7ea3\u7ec9\u732a\u8bf8\u8bdb\u70db\u77a9\u5631\u8d2e\u94f8\u9a7b\u4f2b\u69e0\u94e2\u4e13\u7816\u8f6c\u8d5a\u556d\u9994\u989e\u6869\u5e84\u88c5\u5986\u58ee\u72b6\u9525\u8d58\u5760\u7f00\u9a93\u7f12\u8c06\u51c6\u7740\u6d4a\u8bfc\u956f\u5179\u8d44\u6e0d\u8c18\u7f01\u8f8e\u8d40\u7726\u9531\u9f87\u9cbb\u8e2a\u603b\u7eb5\u506c\u90b9\u8bf9\u9a7a\u9cb0\u8bc5\u7ec4\u955e\u94bb\u7f35\u8e9c\u9cdf\u7ff1\u5e76\u535c\u6c89\u4e11\u6dc0\u8fed\u6597\u8303\u5e72\u768b\u7845\u67dc\u540e\u4f19\u79f8\u6770\u8bc0\u5938\u91cc\u51cc\u4e48\u9709\u637b\u51c4\u6266\u5723\u5c38\u62ac\u6d82\u6d3c\u5582\u6c61\u9528\u54b8\u874e\u5f5d\u6d8c\u6e38\u5401\u5fa1\u613f\u5cb3\u4e91\u7076\u624e\u672d\u7b51\u4e8e\u5fd7\u6ce8\u51cb\u8ba0\u8c2b\u90c4\u52d0\u51fc\u5742\u5785\u57b4\u57ef\u57dd\u82d8\u836c\u836e\u839c\u83bc\u83f0\u85c1\u63f8\u5412\u5423\u5494\u549d\u54b4\u5658\u567c\u56af\u5e5e\u5c99\u5d74\u5f77\u5fbc\u72b8\u72cd\u9980\u9987\u9993\u9995\u6123\u61b7\u61d4\u4e2c\u6e86\u6edf\u6eb7\u6f24\u6f74\u6fb9\u752f\u7e9f\u7ed4\u7ef1\u73c9\u67a7\u684a\u6849\u69d4\u6a65\u8f71\u8f77\u8d4d\u80b7\u80e8\u98da\u7173\u7145\u7198\u610d\u6dfc\u781c\u78d9\u770d\u949a\u94b7\u94d8\u94de\u9503\u950d\u950e\u950f\u9518\u951d\u952a\u952b\u953f\u9545\u954e\u9562\u9565\u9569\u9572\u7a06\u9e4b\u9e5b\u9e71\u75ac\u75b4\u75d6\u766f\u88e5\u8941\u8022\u98a5\u87a8\u9eb4\u9c85\u9c86\u9c87\u9c9e\u9cb4\u9cba\u9cbc\u9cca\u9ccb\u9cd8\u9cd9\u9792\u97b4\u9f44"
traditional_chinese = big5 = u"\u9312\u769a\u85f9\u7919\u611b\u566f\u5b21\u74a6\u66d6\u9744\u8af3\u92a8\u9d6a\u9aaf\u8956\u5967\u5abc\u9a41\u9c32\u58e9\u7f77\u9200\u64fa\u6557\u5504\u9812\u8fa6\u7d46\u9211\u5e6b\u7d81\u938a\u8b17\u525d\u98fd\u5bf6\u5831\u9b91\u9d07\u9f59\u8f29\u8c9d\u92c7\u72fd\u5099\u618a\u9d6f\u8cc1\u931b\u7e43\u7b46\u7562\u6583\u5e63\u9589\u84fd\u55f6\u6f77\u924d\u7bf3\u8e55\u908a\u7de8\u8cb6\u8b8a\u8faf\u8fae\u8290\u7df6\u7c69\u6a19\u9a43\u98ae\u98c6\u93e2\u9463\u9c3e\u9c49\u5225\u765f\u7015\u6ff1\u8cd3\u64ef\u5110\u7e7d\u6ab3\u6baf\u81cf\u944c\u9ad5\u9b22\u9905\u7a1f\u64a5\u7f3d\u9251\u99c1\u9911\u9238\u9d53\u88dc\u923d\u8ca1\u53c3\u8836\u6b98\u615a\u6158\u71e6\u9a42\u9ef2\u84bc\u8259\u5009\u6ec4\u5ec1\u5074\u518a\u6e2c\u60fb\u5c64\u8a6b\u9364\u5115\u91f5\u6519\u647b\u87ec\u995e\u8b92\u7e8f\u93df\u7522\u95e1\u986b\u56c5\u8ac2\u8b96\u8546\u61fa\u5b0b\u9a4f\u8998\u79aa\u9414\u5834\u5617\u9577\u511f\u8178\u5ee0\u66a2\u5000\u8407\u60b5\u95b6\u9be7\u9214\u8eca\u5fb9\u7868\u5875\u9673\u896f\u5096\u8af6\u6aec\u78e3\u9f54\u6490\u7a31\u61f2\u8aa0\u9a01\u68d6\u6a89\u92ee\u943a\u7661\u9072\u99b3\u6065\u9f52\u71be\u98ed\u9d1f\u6c96\u885d\u87f2\u5bf5\u9283\u7587\u8e8a\u7c4c\u7da2\u5114\u5e6c\u8b8e\u6ae5\u5eda\u92e4\u96db\u790e\u5132\u89f8\u8655\u82bb\u7d40\u8e95\u50b3\u91e7\u7621\u95d6\u5275\u6134\u9318\u7d9e\u7d14\u9d89\u7dbd\u8f1f\u9f6a\u8fad\u8a5e\u8cdc\u9dbf\u8070\u8525\u56ea\u5f9e\u53e2\u84ef\u9a44\u6a05\u6e4a\u8f33\u8ea5\u7ac4\u651b\u932f\u92bc\u9e7a\u9054\u5660\u97c3\u5e36\u8cb8\u99d8\u7d3f\u64d4\u55ae\u9132\u64a3\u81bd\u619a\u8a95\u5f48\u6bab\u8ce7\u7649\u7c1e\u7576\u64cb\u9ee8\u8569\u6a94\u8b9c\u78ad\u8960\u6417\u5cf6\u79b1\u5c0e\u76dc\u71fe\u71c8\u9127\u9419\u6575\u6ecc\u905e\u7de0\u7cf4\u8a46\u8ae6\u7d88\u89bf\u93d1\u985b\u9ede\u588a\u96fb\u5dd4\u923f\u7672\u91e3\u8abf\u929a\u9bdb\u8adc\u758a\u9c08\u91d8\u9802\u9320\u8a02\u92cc\u4e1f\u92a9\u6771\u52d5\u68df\u51cd\u5d20\u9d87\u7ac7\u72a2\u7368\u8b80\u8ced\u934d\u7006\u6add\u7258\u7be4\u9ef7\u935b\u65b7\u7dde\u7c6a\u514c\u968a\u5c0d\u61df\u9413\u5678\u9813\u920d\u71c9\u8e89\u596a\u58ae\u9438\u9d5d\u984d\u8a1b\u60e1\u9913\u8ae4\u580a\u95bc\u8edb\u92e8\u9354\u9d9a\u984e\u9853\u9c77\u8a92\u5152\u723e\u990c\u8cb3\u9087\u927a\u9d2f\u9b9e\u767c\u7f70\u95a5\u743a\u792c\u91e9\u7169\u8ca9\u98ef\u8a2a\u7d21\u9201\u9b74\u98db\u8ab9\u5ee2\u8cbb\u7dcb\u9428\u9be1\u7d1b\u58b3\u596e\u61a4\u7cde\u50e8\u8c50\u6953\u92d2\u98a8\u760b\u99ae\u7e2b\u8af7\u9cf3\u7043\u819a\u8f3b\u64ab\u8f14\u8ce6\u5fa9\u8ca0\u8a03\u5a66\u7e1b\u9ce7\u99d9\u7d31\u7d3c\u8cfb\u9ea9\u9b92\u9c12\u91d3\u8a72\u9223\u84cb\u8cc5\u687f\u8d95\u7a08\u8d1b\u5c37\u641f\u7d3a\u5ca1\u525b\u92fc\u7db1\u5d17\u6207\u93ac\u776a\u8aa5\u7e1e\u92ef\u64f1\u9d3f\u95a3\u927b\u500b\u7d07\u9398\u6f41\u7d66\u4e99\u8ce1\u7d86\u9bc1\u9f94\u5bae\u978f\u8ca2\u9264\u6e9d\u830d\u69cb\u8cfc\u5920\u8a6c\u7df1\u89af\u8831\u9867\u8a41\u8f42\u9237\u932e\u9d23\u9d60\u9dbb\u526e\u639b\u9d30\u6451\u95dc\u89c0\u9928\u6163\u8cab\u8a7f\u645c\u9e1b\u9c25\u5ee3\u7377\u898f\u6b78\u9f9c\u95a8\u8ecc\u8a6d\u8cb4\u528a\u532d\u528c\u5aaf\u6a9c\u9bad\u9c56\u8f25\u6efe\u889e\u7dc4\u9bc0\u934b\u570b\u904e\u581d\u54bc\u5e57\u69e8\u87c8\u927f\u99ed\u97d3\u6f22\u95de\u7d4e\u9821\u865f\u705d\u9865\u95a1\u9db4\u8cc0\u8a36\u95d4\u8823\u6a6b\u8f5f\u9d3b\u7d05\u9ecc\u8a0c\u8452\u958e\u9c5f\u58fa\u8b77\u6eec\u6236\u6ef8\u9d98\u5629\u83ef\u756b\u5283\u8a71\u9a4a\u6a3a\u93f5\u61f7\u58de\u6b61\u74b0\u9084\u7de9\u63db\u559a\u7613\u7165\u6e19\u5950\u7e6f\u9370\u9bc7\u9ec3\u8b0a\u9c09\u63ee\u8f1d\u6bc0\u8cc4\u7a62\u6703\u71f4\u532f\u8af1\u8aa8\u7e6a\u8a7c\u8588\u5666\u6fae\u7e62\u743f\u6689\u8477\u6e3e\u8ae2\u991b\u95bd\u7372\u8ca8\u798d\u9225\u944a\u64ca\u6a5f\u7a4d\u9951\u8de1\u8b4f\u96de\u7e3e\u7ddd\u6975\u8f2f\u7d1a\u64e0\u5e7e\u858a\u5291\u6fdf\u8a08\u8a18\u969b\u7e7c\u7d00\u8a10\u8a70\u85ba\u5630\u568c\u9a65\u74a3\u89ac\u9f4f\u78ef\u7f88\u8806\u8e8b\u973d\u9c6d\u9bfd\u593e\u83a2\u9830\u8cc8\u9240\u50f9\u99d5\u90df\u6d79\u92cf\u93b5\u87ef\u6bb2\u76e3\u5805\u7b8b\u9593\u8271\u7dd8\u7e6d\u6aa2\u583f\u9e7c\u63c0\u64bf\u7c21\u5109\u6e1b\u85a6\u6abb\u9452\u8e10\u8ce4\u898b\u9375\u8266\u528d\u991e\u6f38\u6ffa\u6f97\u8aeb\u7e11\u6214\u6229\u77bc\u9dbc\u7b67\u9c39\u97c9\u5c07\u6f3f\u8523\u69f3\u734e\u8b1b\u91ac\u7d73\u97c1\u81a0\u6f86\u9a55\u5b0c\u652a\u9278\u77ef\u50e5\u8173\u9903\u7e73\u7d5e\u8f4e\u8f03\u649f\u5da0\u9de6\u9bab\u968e\u7bc0\u6f54\u7d50\u8aa1\u5c46\u7664\u981c\u9b9a\u7dca\u9326\u50c5\u8b39\u9032\u6649\u71fc\u76e1\u52c1\u834a\u8396\u5df9\u85ce\u9949\u7e09\u8d10\u89b2\u9be8\u9a5a\u7d93\u9838\u975c\u93e1\u5f91\u75d9\u7af6\u51c8\u5244\u6d87\u9015\u5f33\u811b\u975a\u7cfe\u5ec4\u820a\u9b2e\u9ce9\u9df2\u99d2\u8209\u64da\u92f8\u61fc\u5287\u8a4e\u5c68\u6af8\u98b6\u9245\u92e6\u7ab6\u9f5f\u9d51\u7d79\u9308\u942b\u96cb\u89ba\u6c7a\u7d55\u8b4e\u73a8\u921e\u8ecd\u99ff\u76b8\u958b\u51f1\u5274\u584f\u613e\u6137\u93a7\u9347\u9f95\u958c\u9227\u92ac\u9846\u6bbc\u8ab2\u9a0d\u7dd9\u8efb\u9233\u9301\u9837\u58be\u61c7\u9f66\u93d7\u6473\u5eab\u8932\u56b3\u584a\u5108\u9136\u5672\u81be\u5bec\u736a\u9ad6\u7926\u66e0\u6cc1\u8a86\u8a91\u913a\u58d9\u7e8a\u8cba\u8667\u5dcb\u7aba\u994b\u6f70\u5331\u8562\u6192\u8075\u7c23\u95ab\u9315\u9be4\u64f4\u95ca\u8810\u881f\u81d8\u840a\u4f86\u8cf4\u5d0d\u5fa0\u6df6\u7028\u8cda\u775e\u9338\u7669\u7c5f\u85cd\u6b04\u6514\u7c43\u95cc\u862d\u703e\u8b95\u652c\u89bd\u61f6\u7e9c\u721b\u6feb\u5d50\u6b16\u6595\u946d\u8964\u746f\u95ac\u92c3\u6488\u52de\u6f87\u562e\u5d97\u92a0\u9412\u7646\u6a02\u9c33\u9433\u58d8\u985e\u6dda\u8a84\u7e32\u7c6c\u8c8d\u96e2\u9bc9\u79ae\u9e97\u53b2\u52f5\u792b\u6b77\u701d\u96b8\u5137\u9148\u58e2\u85f6\u849e\u863a\u56a6\u9090\u9a6a\u7e2d\u6aea\u6adf\u8f62\u792a\u92f0\u9e1d\u7658\u7cf2\u8e92\u9742\u9c7a\u9c67\u5006\u806f\u84ee\u9023\u942e\u6190\u6f23\u7c3e\u6582\u81c9\u93c8\u6200\u7149\u7df4\u861e\u5969\u7032\u7489\u6bae\u8933\u895d\u9c31\u7ce7\u6dbc\u5169\u8f1b\u8ad2\u9b4e\u7642\u907c\u9410\u7e5a\u91d5\u9def\u7375\u81e8\u9130\u9c57\u51dc\u8cc3\u85fa\u5ee9\u6a81\u8f54\u8eaa\u9f61\u9234\u9748\u5dba\u9818\u7dbe\u6b1e\u87f6\u9bea\u993e\u5289\u700f\u9a2e\u7db9\u93a6\u9dda\u9f8d\u807e\u56a8\u7c60\u58df\u650f\u96b4\u8622\u7027\u74cf\u6af3\u6727\u7931\u6a13\u5a41\u645f\u7c0d\u50c2\u851e\u560d\u5d81\u93e4\u763a\u802c\u87bb\u9acf\u8606\u76e7\u9871\u5eec\u7210\u64c4\u9e75\u865c\u9b6f\u8cc2\u797f\u9304\u9678\u58da\u64fc\u5695\u95ad\u7018\u6de5\u6ae8\u6ad3\u8f64\u8f05\u8f46\u6c0c\u81da\u9e15\u9dfa\u826b\u9c78\u5dd2\u6523\u5b7f\u7064\u4e82\u81e0\u5b4c\u6b12\u9e1e\u947e\u6384\u8f2a\u502b\u4f96\u6dea\u7db8\u8ad6\u5707\u863f\u7f85\u908f\u947c\u7c6e\u9a3e\u99f1\u7d61\u7296\u7380\u6ffc\u6b0f\u8161\u93cd\u9a62\u5442\u92c1\u4fb6\u5c62\u7e37\u616e\u6ffe\u7da0\u6ada\u8938\u92dd\u5638\u5abd\u746a\u78bc\u879e\u99ac\u7f75\u55ce\u561c\u5b24\u69aa\u8cb7\u9ea5\u8ce3\u9081\u8108\u52f1\u779e\u9945\u883b\u6eff\u8b3e\u7e35\u93dd\u9859\u9c3b\u8c93\u9328\u925a\u8cbf\u9ebc\u6c92\u9382\u9580\u60b6\u5011\u636b\u71dc\u61e3\u9346\u9333\u5922\u7787\u8b0e\u5f4c\u8993\u51aa\u7f8b\u8b10\u737c\u79b0\u7dbf\u7dec\u6fa0\u9766\u9efd\u5edf\u7df2\u7e46\u6ec5\u61ab\u95a9\u9594\u7de1\u9cf4\u9298\u8b2c\u8b28\u9a40\u9943\u6b7f\u93cc\u8b00\u755d\u926c\u5436\u9209\u7d0d\u96e3\u6493\u8166\u60f1\u9b27\u9403\u8a25\u9912\u5167\u64ec\u81a9\u922e\u9be2\u6506\u8f26\u9bf0\u91c0\u9ce5\u8526\u88ca\u8076\u5699\u9477\u93b3\u9689\u8617\u56c1\u9862\u8ea1\u6ab8\u7370\u5be7\u64f0\u6fd8\u82e7\u5680\u8079\u9215\u7d10\u81bf\u6fc3\u8fb2\u5102\u5665\u99d1\u91f9\u8afe\u513a\u7627\u6b50\u9dd7\u6bc6\u5614\u6f1a\u8b33\u616a\u750c\u76e4\u8e63\u9f90\u62cb\u76b0\u8ce0\u8f61\u5674\u9d6c\u7d15\u7f86\u9239\u9a19\u8ade\u99e2\u98c4\u7e39\u983b\u8ca7\u5b2a\u860b\u6191\u8a55\u6f51\u9817\u91d9\u64b2\u92ea\u6a38\u8b5c\u93f7\u9420\u68f2\u81cd\u9f4a\u9a0e\u8c48\u555f\u6c23\u68c4\u8a16\u8604\u9a0f\u7dba\u69bf\u78e7\u980e\u980f\u9c2d\u727d\u91ec\u925b\u9077\u7c3d\u8b19\u9322\u9257\u6f5b\u6dfa\u8b74\u5879\u50c9\u8541\u6173\u9a2b\u7e7e\u69e7\u9210\u69cd\u55c6\u58bb\u8594\u5f37\u6436\u5b19\u6aa3\u6227\u7197\u9306\u93d8\u93f9\u7fa5\u8e4c\u936c\u6a4b\u55ac\u50d1\u7ff9\u7ac5\u8a9a\u8b59\u854e\u7e70\u78fd\u8e7a\u7aca\u611c\u9365\u7bcb\u6b3d\u89aa\u5be2\u92df\u8f15\u6c2b\u50be\u9803\u8acb\u6176\u64b3\u9bd6\u74ca\u7aae\u7162\u86fa\u5df0\u8cd5\u87e3\u9c0d\u8da8\u5340\u8ec0\u9a45\u9f72\u8a58\u5d87\u95c3\u89b7\u9d1d\u9874\u6b0a\u52f8\u8a6e\u7da3\u8f07\u9293\u537b\u9d72\u78ba\u95cb\u95d5\u6128\u8b93\u9952\u64fe\u7e5e\u8558\u5b08\u6a48\u71b1\u97cc\u8a8d\u7d09\u98ea\u8ed4\u69ae\u7d68\u5db8\u8811\u7e1f\u92a3\u9870\u8edf\u92b3\u8706\u958f\u6f64\u7051\u85a9\u98af\u9c13\u8cfd\u5098\u6bff\u7cdd\u55aa\u9a37\u6383\u7e45\u6f80\u55c7\u92ab\u7a61\u6bba\u524e\u7d17\u93a9\u9bca\u7be9\u66ec\u91c3\u522a\u9583\u965c\u8d0d\u7e55\u8a15\u59cd\u9a38\u91e4\u9c54\u5891\u50b7\u8cde\u5770\u6ba4\u89f4\u71d2\u7d39\u8cd2\u651d\u61fe\u8a2d\u5399\u7044\u756c\u7d33\u5be9\u5b38\u814e\u6ef2\u8a75\u8ad7\u700b\u8072\u7e69\u52dd\u5e2b\u7345\u6fd5\u8a69\u6642\u8755\u5be6\u8b58\u99db\u52e2\u9069\u91cb\u98fe\u8996\u8a66\u8b1a\u5852\u8494\u5f12\u8efe\u8cb0\u9230\u9c23\u58fd\u7378\u7dac\u6a1e\u8f38\u66f8\u8d16\u5c6c\u8853\u6a39\u8c4e\u6578\u6504\u7d13\u5e25\u9582\u96d9\u8ab0\u7a05\u9806\u8aaa\u78a9\u720d\u9460\u7d72\u98fc\u5edd\u99df\u7de6\u9376\u9de5\u8073\u616b\u980c\u8a1f\u8aa6\u64fb\u85ea\u993f\u98bc\u93aa\u8607\u8a34\u8085\u8b16\u7a4c\u96d6\u96a8\u7d8f\u6b72\u8ab6\u5b6b\u640d\u7b4d\u84c0\u733b\u7e2e\u7463\u9396\u55e9\u8127\u737a\u64bb\u95e5\u9248\u9c28\u81fa\u614b\u9226\u9b90\u6524\u8caa\u7671\u7058\u58c7\u8b5a\u8ac7\u5606\u66c7\u926d\u931f\u9807\u6e6f\u71d9\u513b\u9933\u940b\u93dc\u6fe4\u7d73\u8a0e\u97dc\u92f1\u9a30\u8b04\u92bb\u984c\u9ad4\u5c5c\u7df9\u9d5c\u95d0\u689d\u7cf6\u9f60\u9c37\u8cbc\u9435\u5ef3\u807d\u70f4\u9285\u7d71\u615f\u982d\u9204\u79bf\u5716\u91f7\u5718\u6476\u9839\u86fb\u98e9\u812b\u9d15\u99b1\u99dd\u6a62\u7c5c\u9f09\u896a\u5aa7\u8183\u5f4e\u7063\u9811\u842c\u7d08\u7db0\u7db2\u8f1e\u97cb\u9055\u570d\u70ba\u6ff0\u7dad\u8466\u5049\u507d\u7def\u8b02\u885b\u8ac9\u5e43\u95c8\u6e88\u6f7f\u744b\u97d9\u7152\u9baa\u6eab\u805e\u7d0b\u7a69\u554f\u95bf\u7515\u64be\u8778\u6e26\u7aa9\u81e5\u8435\u9f77\u55da\u93a2\u70cf\u8aa3\u7121\u856a\u5433\u5862\u9727\u52d9\u8aa4\u9114\u5ee1\u61ae\u5af5\u9a16\u9d61\u9da9\u932b\u72a7\u8972\u7fd2\u9291\u6232\u7d30\u993c\u9b29\u74bd\u89a1\u8766\u8f44\u5cfd\u4fe0\u72f9\u5ec8\u5687\u7864\u9bae\u7e96\u8ce2\u929c\u9591\u986f\u96aa\u73fe\u737b\u7e23\u9921\u7fa8\u61b2\u7dda\u83a7\u859f\u861a\u5cf4\u736b\u5afb\u9df4\u7647\u8814\u79c8\u8e9a\u5ec2\u9472\u9109\u8a73\u97ff\u9805\u858c\u9909\u9a64\u7dd7\u9957\u856d\u56c2\u92b7\u66c9\u562f\u5635\u701f\u9a4d\u7d83\u689f\u7c2b\u5354\u633e\u651c\u8105\u8ae7\u5beb\u7009\u8b1d\u893b\u64f7\u7d32\u7e88\u92c5\u91c1\u8208\u9658\u6ece\u5147\u6d36\u92b9\u7e61\u9948\u9d42\u865b\u5653\u9808\u8a31\u6558\u7dd2\u7e8c\u8a61\u980a\u8ed2\u61f8\u9078\u766c\u7d62\u8afc\u9249\u93c7\u5b78\u8b14\u6fa9\u9c48\u52db\u8a62\u5c0b\u99b4\u8a13\u8a0a\u905c\u5864\u6f6f\u9c58\u58d3\u9d09\u9d28\u555e\u4e9e\u8a1d\u57e1\u5a6d\u690f\u6c2c\u95b9\u7159\u9e7d\u56b4\u5dd6\u984f\u95bb\u8277\u53ad\u786f\u5f65\u8afa\u9a57\u53b4\u8d17\u513c\u5157\u8b9e\u61e8\u9586\u91c5\u9b58\u995c\u9f34\u9d26\u694a\u63da\u760d\u967d\u7662\u990a\u6a23\u716c\u7464\u6416\u582f\u9059\u7aaf\u8b20\u85e5\u8efa\u9dc2\u9c29\u723a\u9801\u696d\u8449\u9768\u8b01\u9134\u66c4\u71c1\u91ab\u92a5\u9824\u907a\u5100\u87fb\u85dd\u5104\u61b6\u7fa9\u8a63\u8b70\u8abc\u8b6f\u7570\u7e79\u8a52\u56c8\u5da7\u98f4\u61cc\u9a5b\u7e0a\u8efc\u8cbd\u91d4\u93b0\u943f\u761e\u8264\u852d\u9670\u9280\u98f2\u96b1\u92a6\u766e\u6afb\u5b30\u9df9\u61c9\u7e93\u7469\u87a2\u71df\u7192\u8805\u8d0f\u7a4e\u584b\u9daf\u7e08\u93a3\u6516\u56b6\u7005\u7020\u74d4\u9e1a\u766d\u9826\u7f4c\u55b2\u64c1\u50ad\u7670\u8e34\u8a60\u93de\u512a\u6182\u90f5\u923e\u7336\u8a98\u8555\u92aa\u9b77\u8f3f\u9b5a\u6f01\u5a1b\u8207\u5dbc\u8a9e\u7344\u8b7d\u9810\u99ad\u50b4\u4fc1\u8adb\u8aed\u8577\u5d33\u98eb\u95be\u5ad7\u7d06\u89a6\u6b5f\u923a\u9d52\u9df8\u9f6c\u9d1b\u6df5\u8f45\u5712\u54e1\u5713\u7de3\u9060\u6ade\u9cf6\u9eff\u7d04\u8e8d\u9470\u7cb5\u6085\u95b1\u925e\u9116\u52fb\u9695\u904b\u860a\u919e\u6688\u97fb\u9106\u8553\u60f2\u614d\u7d1c\u97de\u6b9e\u6c33\u96dc\u707d\u8f09\u6522\u66ab\u8d0a\u74da\u8db2\u93e8\u8d13\u81df\u99d4\u947f\u68d7\u8cac\u64c7\u5247\u6fa4\u8cfe\u5616\u5e58\u7c00\u8cca\u8b56\u8d08\u7d9c\u7e52\u8ecb\u9358\u9598\u67f5\u8a50\u9f4b\u50b5\u6c08\u76de\u65ac\u8f3e\u5d84\u68e7\u6230\u7dbb\u8b6b\u5f35\u6f32\u5e33\u8cec\u8139\u8d99\u8a54\u91d7\u87c4\u8f4d\u937a\u9019\u8b2b\u8f12\u9dd3\u8c9e\u91dd\u5075\u8a3a\u93ae\u9663\u6e5e\u7e1d\u6968\u8eeb\u8cd1\u798e\u9d06\u6399\u775c\u7319\u722d\u5e40\u7665\u912d\u8b49\u8acd\u5d22\u9266\u931a\u7b8f\u7e54\u8077\u57f7\u7d19\u646f\u64f2\u5e5f\u8cea\u6eef\u9a2d\u6adb\u6894\u8ef9\u8f0a\u8d04\u9dd9\u8784\u7e36\u8e93\u8e91\u89f6\u9418\u7d42\u7a2e\u816b\u773e\u937e\u8b05\u8ef8\u76ba\u665d\u9a5f\u7d02\u7e10\u8c6c\u8af8\u8a85\u71ed\u77da\u56d1\u8caf\u9444\u99d0\u4f47\u6ae7\u9296\u5c08\u78da\u8f49\u8cfa\u56c0\u994c\u9873\u6a01\u838a\u88dd\u599d\u58ef\u72c0\u9310\u8d05\u589c\u7db4\u9a05\u7e0b\u8ac4\u6e96\u8457\u6fc1\u8ad1\u9432\u8332\u8cc7\u6f2c\u8aee\u7dc7\u8f1c\u8cb2\u7725\u9319\u9f5c\u9bd4\u8e64\u7e3d\u7e31\u50af\u9112\u8acf\u9a36\u9beb\u8a5b\u7d44\u93c3\u9246\u7e98\u8ea6\u9c52\u7ffa\u4e26\u8514\u6c88\u919c\u6fb1\u53e0\u9b25\u7bc4\u5e79\u81ef\u77fd\u6ac3\u5f8c\u5925\u7a2d\u5091\u8a23\u8a87\u88cf\u6de9\u9ebc\u9ef4\u649a\u6dd2\u6261\u8056\u5c4d\u64e1\u5857\u7aaa\u9935\u6c59\u9341\u9e79\u880d\u5f5c\u6e67\u904a\u7c72\u79a6\u9858\u5dbd\u96f2\u7ac8\u7d2e\u5284\u7bc9\u65bc\u8a8c\u8a3b\u96d5\u8a01\u8b7e\u90e4\u731b\u6c39\u962a\u58df\u5816\u57b5\u588a\u6abe\u8552\u8464\u84e7\u8493\u83c7\u69c1\u6463\u54a4\u551a\u54e2\u565d\u5645\u6485\u5288\u8b14\u8946\u5db4\u810a\u4eff\u50e5\u7341\u9e85\u9918\u9937\u994a\u9962\u695e\u6035\u61cd\u723f\u6f35\u7069\u6df7\u6feb\u7026\u6de1\u5be7\u7cf8\u7d5d\u7dd4\u7449\u6898\u68ec\u6848\u6a70\u6aeb\u8ef2\u8ee4\u8ceb\u8181\u8156\u98c8\u7cca\u7146\u6e9c\u6e63\u6e3a\u78b8\u6efe\u7798\u9208\u9255\u92e3\u92b1\u92e5\u92f6\u9426\u9427\u9369\u9340\u9343\u9307\u9384\u9387\u93bf\u941d\u9465\u9479\u9454\u7a6d\u9d93\u9da5\u9e0c\u7667\u5c59\u7602\u81d2\u8947\u7e48\u802e\u986c\u87ce\u9eaf\u9b81\u9b83\u9b8e\u9bd7\u9bdd\u9bf4\u9c5d\u9bff\u9c20\u9c35\u9c45\u97bd\u97dd\u9f47"
def convert_chinese(text, from_charset, to_charset):
output_text = ''
for ch in text:
idx = from_charset.find(ch)
output_text += ch if idx < 0 else to_charset[idx]
return output_text
simplify = partial(convert_chinese, from_charset=big5, to_charset=gbk)
tradify = partial(convert_chinese, from_charset=gbk, to_charset=big5) | /sacremoses_xt-0.0.44-py3-none-any.whl/sacremoses/chinese.py | 0.436982 | 0.150216 | chinese.py | pypi |
import re
from six import text_type
from sacremoses.corpus import Perluniprops
from sacremoses.corpus import NonbreakingPrefixes
from sacremoses.util import is_cjk
perluniprops = Perluniprops()
nonbreaking_prefixes = NonbreakingPrefixes()
class MosesSentTokenizer(object):
"""
This is a Python port of the Moses Tokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/ems/support/split-sentences.perl
"""
raise NotImplementedError
r"""
# Perl Unicode Properties character sets.
IsPi = text_type("".join(perluniprops.chars("IsPi")))
IsUpper = text_type("".join(perluniprops.chars("IsUpper")))
IsPf = text_type("".join(perluniprops.chars("IsPf")))
Punctuation = text_type("".join(perluniprops.chars("Punctuation")))
CJK = text_type("".join(perluniprops.chars("CJK")))
CJKSymbols = text_type("".join(perluniprops.chars("CJKSymbols")))
IsAlnum = text_type("".join(perluniprops.chars("IsAlnum")))
# Remove ASCII junk.
DEDUPLICATE_SPACE = r"\s+", r" "
# Non-period end of sentence markers (?!) followed by sentence starters.
NONPERIOD_UPPER = r"([?!]) +([\'\"\(\[\¿\¡\p{startpunct}]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Multi-dots followed by sentence starters.
MULTDOT_UPPER = r"(\.[\.]+) +([\'\"\(\[\¿\¡\p{startpunct}]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Add breaks for sentences that end with some sort of punctuation
# inside a quote or parenthetical and are followed by a possible
# sentence starter punctuation and upper case.
QUOTES_UPPER = r"([?!\.][\ ]*[\'\"\)\]\p{endpunct}]+) +([\'\"\(\[\¿\¡\p{startpunct}]*[\ ]*[\p{upper}])".format(endpunct=IsPf, startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Add breaks for sentences that end with some sort of punctuation,
# and are followed by a sentence starter punctuation and upper case.
ENDPUNCT_UPPER = r"([?!\.]) +([\'\"\(\[\¿\¡\p{startpunct}]+[\ ]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
IS_EOS = r"([\p{alphanum}\.\-]*)([\'\"\)\]\%\p{endpunct}]*)(\.+)$".format(alphanum=IsAlnum, endpunct=IsPf)
def __init__(self, lang="en", custom_nonbreaking_prefixes_file=None):
# Load custom nonbreaking prefixes file.
if custom_nonbreaking_prefixes_file:
self.NONBREAKING_PREFIXES = []
with open(custom_nonbreaking_prefixes_file, 'r') as fin:
for line in fin:
line = line.strip()
if line and not line.startswith("#"):
if line not in self.NONBREAKING_PREFIXES:
self.NONBREAKING_PREFIXES.append(line)
detokenized_text = ""
tokens = text.split()
# Iterate through every token till the last 2nd token.
for i, token in enumerate(iter(tokens[:-1])):
if re.search(IS_EOS, token):
pass
""" | /sacremoses_xt-0.0.44-py3-none-any.whl/sacremoses/sent_tokenize.py | 0.488527 | 0.167797 | sent_tokenize.py | pypi |
# Sacremoses
[](https://travis-ci.org/alvations/sacremoses)
[](https://ci.appveyor.com/project/alvations/sacremoses)
# License
MIT License.
# Install
```
pip install -U sacremoses
```
NOTE: Sacremoses only supports Python 3 now (`sacremoses>=0.0.41`). If you're using Python 2, the last possible version is `sacremoses==0.0.40`.
# Usage (Python)
## Tokenizer and Detokenizer
```python
>>> from sacremoses import MosesTokenizer, MosesDetokenizer
>>> mt = MosesTokenizer(lang='en')
>>> text = u'This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf'
>>> expected_tokenized = u'This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf'
>>> tokenized_text = mt.tokenize(text, return_str=True)
>>> tokenized_text == expected_tokenized
True
>>> mt, md = MosesTokenizer(lang='en'), MosesDetokenizer(lang='en')
>>> sent = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?"
>>> expected_tokens = [u'This', u'ain', u''t', u'funny', u'.', u'It', u''s', u'actually', u'hillarious', u',', u'yet', u'double', u'Ls', u'.', u'|', u'[', u']', u'<', u'>', u'[', u']', u'&', u'You', u''re', u'gonna', u'shake', u'it', u'off', u'?', u'Don', u''t', u'?']
>>> expected_detokens = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [] & You're gonna shake it off? Don't?"
>>> mt.tokenize(sent) == expected_tokens
True
>>> md.detokenize(tokens) == expected_detokens
True
```
## Truecaser
```python
>>> from sacremoses import MosesTruecaser, MosesTokenizer
# Train a new truecaser from a 'big.txt' file.
>>> mtr = MosesTruecaser()
>>> mtok = MosesTokenizer(lang='en')
# Save the truecase model to 'big.truecasemodel' using `save_to`
>> tokenized_docs = [mtok.tokenize(line) for line in open('big.txt')]
>>> mtr.train(tokenized_docs, save_to='big.truecasemodel')
# Save the truecase model to 'big.truecasemodel' after training
# (just in case you forgot to use `save_to`)
>>> mtr = MosesTruecaser()
>>> mtr.train('big.txt')
>>> mtr.save_model('big.truecasemodel')
# Truecase a string after training a model.
>>> mtr = MosesTruecaser()
>>> mtr.train('big.txt')
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES")
['the', 'adventures', 'of', 'Sherlock', 'Holmes']
# Loads a model and truecase a string using trained model.
>>> mtr = MosesTruecaser('big.truecasemodel')
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES")
['the', 'adventures', 'of', 'Sherlock', 'Holmes']
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES", return_str=True)
'the ADVENTURES OF SHERLOCK HOLMES'
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES", return_str=True, use_known=True)
'the adventures of Sherlock Holmes'
```
## Normalizer
```python
>>> from sacremoses import MosesPunctNormalizer
>>> mpn = MosesPunctNormalizer()
>>> mpn.normalize('THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS."')
'THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS."'
```
# Usage (CLI)
Since version `0.0.42`, the pipeline feature for CLI is introduced, thus there
are global options that should be set first before calling the commands:
- language
- processes
- encoding
- quiet
```shell
$ pip install -U sacremoses>=0.0.42
$ sacremoses --help
Usage: sacremoses [OPTIONS] COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...
Options:
-l, --language TEXT Use language specific rules when tokenizing
-j, --processes INTEGER No. of processes.
-e, --encoding TEXT Specify encoding of file.
-q, --quiet Disable progress bar.
--version Show the version and exit.
-h, --help Show this message and exit.
Commands:
detokenize
detruecase
normalize
tokenize
train-truecase
truecase
```
## Pipeline
Example to chain the following commands:
- `normalize` with `-c` option to remove control characters.
- `tokenize` with `-a` option for aggressive dash split rules.
- `truecase` with `-a` option to indicate that model is for ASR
- if `big.truemodel` exists, load the model with `-m` option,
- otherwise train a model and save it with `-m` option to `big.truemodel` file.
- save the output to console to the `big.txt.norm.tok.true` file.
```shell
cat big.txt | sacremoses -l en -j 4 \
normalize -c tokenize -a truecase -a -m big.truemodel \
> big.txt.norm.tok.true
```
## Tokenizer
```shell
$ sacremoses tokenize --help
Usage: sacremoses tokenize [OPTIONS]
Options:
-a, --aggressive-dash-splits Triggers dash split rules.
-x, --xml-escape Escape special characters for XML.
-p, --protected-patterns TEXT Specify file with patters to be protected in
tokenisation.
-c, --custom-nb-prefixes TEXT Specify a custom non-breaking prefixes file,
add prefixes to the default ones from the
specified language.
-h, --help Show this message and exit.
$ sacremoses -l en -j 4 tokenize < big.txt > big.txt.tok
100%|██████████████████████████████████| 128457/128457 [00:05<00:00, 24363.39it/s
$ wget https://raw.githubusercontent.com/moses-smt/mosesdecoder/master/scripts/tokenizer/basic-protected-patterns
$ sacremoses -l en -j 4 tokenize -p basic-protected-patterns < big.txt > big.txt.tok
100%|██████████████████████████████████| 128457/128457 [00:05<00:00, 22183.94it/s
```
## Detokenizer
```shell
$ sacremoses detokenize --help
Usage: sacremoses detokenize [OPTIONS]
Options:
-x, --xml-unescape Unescape special characters for XML.
-h, --help Show this message and exit.
$ sacremoses -l en -j 4 detokenize < big.txt.tok > big.txt.tok.detok
100%|██████████████████████████████████| 128457/128457 [00:16<00:00, 7931.26it/s]
```
## Truecase
```shell
$ sacremoses truecase --help
Usage: sacremoses truecase [OPTIONS]
Options:
-m, --modelfile TEXT Filename to save/load the modelfile.
[required]
-a, --is-asr A flag to indicate that model is for ASR.
-p, --possibly-use-first-token Use the first token as part of truecase
training.
-h, --help Show this message and exit.
$ sacremoses -j 4 truecase -m big.model < big.txt.tok > big.txt.tok.true
100%|██████████████████████████████████| 128457/128457 [00:09<00:00, 14257.27it/s]
```
## Detruecase
```shell
$ sacremoses detruecase --help
Usage: sacremoses detruecase [OPTIONS]
Options:
-j, --processes INTEGER No. of processes.
-a, --is-headline Whether the file are headlines.
-e, --encoding TEXT Specify encoding of file.
-h, --help Show this message and exit.
$ sacremoses -j 4 detruecase < big.txt.tok.true > big.txt.tok.true.detrue
100%|█████████████████████████████████| 128457/128457 [00:04<00:00, 26945.16it/s]
```
## Normalize
```shell
$ sacremoses normalize --help
Usage: sacremoses normalize [OPTIONS]
Options:
-q, --normalize-quote-commas Normalize quotations and commas.
-d, --normalize-numbers Normalize number.
-p, --replace-unicode-puncts Replace unicode punctuations BEFORE
normalization.
-c, --remove-control-chars Remove control characters AFTER normalization.
-h, --help Show this message and exit.
$ sacremoses -j 4 normalize < big.txt > big.txt.norm
100%|██████████████████████████████████| 128457/128457 [00:09<00:00, 13096.23it/s]
```
| /sacremoses-0.0.53.tar.gz/sacremoses-0.0.53/README.md | 0.556882 | 0.727564 | README.md | pypi |
import datetime
import time
from .interfaces import ITimeZone
from zope.interface import implementer
from zope.component import queryUtility
ZERO = datetime.timedelta(seconds=0)
def is_dst(dt):
"""Return True or False depending of tm_isdst value
Convert dt in timestamp, and get time object with this timestamp to get
the localtime and see if this time is dst or not
"""
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
@implementer(ITimeZone)
class UTC(datetime.tzinfo):
"""Represent the UTC timezone.
From http://docs.python.org/library/datetime.html#tzinfo-objects examples
XXXvlab: pytz.utc isn't better ?
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __repr__(self):
return "<TimeZone: UTC>"
@implementer(ITimeZone)
class TzSystem(datetime.tzinfo):
"""Get the timezone locale of the system. It is used for datetime object.
This object get the local DST and utcoffset.
More explanation about how it is work for utcoffset:
time.daylight is Nonzero if a DST timezone is defined.
In this case we have two different values in stdoffset and dstoffset.
For example for timezone 'Europe/Paris' we have:
stdoffset = -3600
dstoffset = -7200
In `utcoffset` method, we check for the daylight saving or not and adjust
offset in consequence.
"""
# Get the right offset with DST or not
stdoffset = datetime.timedelta(seconds=(- time.timezone))
if time.daylight:
dstoffset = datetime.timedelta(seconds=(- time.altzone))
else:
dstoffset = stdoffset
# Get the DST adjustement in minutes
dstdiff = dstoffset - stdoffset
def utcoffset(self, dt):
"""Return offset of local time from UTC, in minutes"""
return self.dstoffset if is_dst(dt) else self.stdoffset
def dst(self, dt):
"""Return the daylight saving time (DST) adjustment, in minutes"""
return self.dstdiff if is_dst(dt) else ZERO
def tzname(self, dt):
"""Return time zone name of the datetime object dt"""
return time.tzname[is_dst(dt)]
def __repr__(self):
return "<TimeZone: System>"
@implementer(ITimeZone)
class TzTest(datetime.tzinfo):
"""Timezone crafted for tests"""
def utcoffset(self, dt):
return datetime.timedelta(hours=0, minutes=5)
def tzname(self, dt):
return "GMT +5m"
def dst(self, dt):
return ZERO
def __repr__(self):
return "<TimeZone: Test>"
testTimeZone = TzTest()
defaultLocalTimeZone = TzSystem()
def TzLocal():
"""Get local timezone with ZCA"""
return queryUtility(ITimeZone, name='local', default=defaultLocalTimeZone) | /sact.epoch-1.3.0.tar.gz/sact.epoch-1.3.0/src/sact/epoch/timezone.py | 0.82108 | 0.395426 | timezone.py | pypi |
import datetime
from typing import Any, Dict, List, Optional, Union
import httpx
from ...client import Client
from ...models.get_object_consumption_response_200_item import GetObjectConsumptionResponse200Item
from ...types import UNSET, Response, Unset
def _get_kwargs(
*,
client: Client,
o_eic: str,
mp_nr: Union[Unset, None, str] = UNSET,
m_nr: Union[Unset, None, str] = UNSET,
d_f: datetime.datetime,
d_t: datetime.datetime,
) -> Dict[str, Any]:
url = "{}/get-object-consumption".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
json_d_f = d_f.isoformat()
json_d_t = d_t.isoformat()
params: Dict[str, Any] = {
"oEIC": o_eic,
"mpNr": mp_nr,
"mNr": m_nr,
"dF": json_d_f,
"dT": json_d_t,
}
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
"verify": client.verify_ssl,
}
def _parse_response(*, response: httpx.Response) -> Optional[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
if response.status_code == 200:
response_200 = []
_response_200 = response.json()
for response_200_item_data in _response_200:
response_200_item = GetObjectConsumptionResponse200Item.from_dict(response_200_item_data)
response_200.append(response_200_item)
return response_200
if response.status_code == 400:
response_400 = None
return response_400
if response.status_code == 401:
response_401 = None
return response_401
if response.status_code == 500:
response_500 = None
return response_500
return None
def _build_response(*, response: httpx.Response) -> Response[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
*,
client: Client,
o_eic: str,
mp_nr: Union[Unset, None, str] = UNSET,
m_nr: Union[Unset, None, str] = UNSET,
d_f: datetime.datetime,
d_t: datetime.datetime,
) -> Response[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
kwargs = _get_kwargs(
client=client,
o_eic=o_eic,
mp_nr=mp_nr,
m_nr=m_nr,
d_f=d_f,
d_t=d_t,
)
response = httpx.get(
**kwargs,
)
return _build_response(response=response)
def sync(
*,
client: Client,
o_eic: str,
mp_nr: Union[Unset, None, str] = UNSET,
m_nr: Union[Unset, None, str] = UNSET,
d_f: datetime.datetime,
d_t: datetime.datetime,
) -> Optional[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
"""The service returns data according to the APIKEY assigned to the object in addition to the rest of parameters for the data request. The service provides the consumption information only for objects with smart meters. The service provides information about both the consumption used for billing and the actual consumption from the smart meter, which was successfully read the first time the meter was read.
Additional terms of use Maximum available consumption data period: one year from the end of the current billing period. If the request period exceeds one year, only one year of data, counting from the dT value element, is returned, i.e. for the last 365 days.
"""
return sync_detailed(
client=client,
o_eic=o_eic,
mp_nr=mp_nr,
m_nr=m_nr,
d_f=d_f,
d_t=d_t,
).parsed
async def asyncio_detailed(
*,
client: Client,
o_eic: str,
mp_nr: Union[Unset, None, str] = UNSET,
m_nr: Union[Unset, None, str] = UNSET,
d_f: datetime.datetime,
d_t: datetime.datetime,
) -> Response[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
kwargs = _get_kwargs(
client=client,
o_eic=o_eic,
mp_nr=mp_nr,
m_nr=m_nr,
d_f=d_f,
d_t=d_t,
)
async with httpx.AsyncClient() as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
async def asyncio(
*,
client: Client,
o_eic: str,
mp_nr: Union[Unset, None, str] = UNSET,
m_nr: Union[Unset, None, str] = UNSET,
d_f: datetime.datetime,
d_t: datetime.datetime,
) -> Optional[Union[Any, List[GetObjectConsumptionResponse200Item]]]:
"""The service returns data according to the APIKEY assigned to the object in addition to the rest of parameters for the data request. The service provides the consumption information only for objects with smart meters. The service provides information about both the consumption used for billing and the actual consumption from the smart meter, which was successfully read the first time the meter was read.
Additional terms of use Maximum available consumption data period: one year from the end of the current billing period. If the request period exceeds one year, only one year of data, counting from the dT value element, is returned, i.e. for the last 365 days.
"""
return (
await asyncio_detailed(
client=client,
o_eic=o_eic,
mp_nr=mp_nr,
m_nr=m_nr,
d_f=d_f,
d_t=d_t,
)
).parsed | /sadales-tikls-m2m-1.0.0.tar.gz/sadales-tikls-m2m-1.0.0/sadales_tikls_m2m_api_client/api/default/get_object_consumption.py | 0.81841 | 0.231093 | get_object_consumption.py | pypi |
from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.error_invalid_params_item import ErrorInvalidParamsItem
T = TypeVar("T", bound="Error")
@attr.s(auto_attribs=True)
class Error:
""" """
title: str
invalid_params: List[ErrorInvalidParamsItem]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
title = self.title
invalid_params = []
for invalid_params_item_data in self.invalid_params:
invalid_params_item = invalid_params_item_data.to_dict()
invalid_params.append(invalid_params_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"title": title,
"invalid-params": invalid_params,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
title = d.pop("title")
invalid_params = []
_invalid_params = d.pop("invalid-params")
for invalid_params_item_data in _invalid_params:
invalid_params_item = ErrorInvalidParamsItem.from_dict(invalid_params_item_data)
invalid_params.append(invalid_params_item)
error = cls(
title=title,
invalid_params=invalid_params,
)
error.additional_properties = d
return error
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties | /sadales-tikls-m2m-1.0.0.tar.gz/sadales-tikls-m2m-1.0.0/sadales_tikls_m2m_api_client/models/error.py | 0.749912 | 0.192748 | error.py | pypi |
<!-- markdownlint-disable -->
<h2 align="center" style="font-family:verdana;font-size:150%"> <b>S</b>equencing <b>A</b>nalysis and <b>D</b>ata Library for <b>I</b>mmunoinformatics <b>E</b>xploration</h2>
<div align="center">
<img src="https://sadiestaticcrm.s3.us-west-2.amazonaws.com/Sadie.svg" alt="SADIE" style="margin:0.51em;width:50%">
</div>
<div class="flex-container" align="center">
<div class="flex-container" align="center">
<a href="https://img.shields.io/badge/Python-3.7%7C3.8%7C3.9%7C3.10-blue">
<img src="https://img.shields.io/badge/Python-3.7%7C3.8%7C3.9%7C3.10-blue"
alt="Python Version">
<a href="https://github.com/psf/black">
<img src="https://img.shields.io/badge/code%20style-black-000000.svg"
alt="Format Version">
<a href="https://codecov.io/gh/jwillis0720/sadie">
<img src="https://codecov.io/gh/jwillis0720/sadie/branch/main/graph/badge.svg?token=EH9QEX4ZMP"
alt="Code Coverage">
<a href="https://github.com/pre-commit/pre-commit">
</div>
<div class="flex-container" align="center">
<img src="https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white"
alt="pre commit">
<a href="https://pypi.org/project/sadie-antibody">
<img src="https://img.shields.io/pypi/v/sadie-antibody?color=blue"
alt='pypi'>
<a href="https://sadie.jordanrwillis.com" >
<img src="https://api.netlify.com/api/v1/badges/59ff956c-82d9-4900-83c7-758ed21ccb34/deploy-status"
alt="Documentation">
</a>
<a href="https://github.com/jwillis0720/sadie/actions/workflows/docs.yml" >
<img src="https://github.com/jwillis0720/sadie/actions/workflows/docs.yml/badge.svg"
alt="Documentation">
</a>
</div>
<div class="flex-container" align="center">
<a href="https://github.com/jwillis0720/sadie/workflows/Linux%20Build%20and%20Test/badge.svg">
<img src="https://github.com/jwillis0720/sadie/workflows/Linux%20Build%20and%20Test/badge.svg"
alt="Linux Build">
<a href="https://github.com/jwillis0720/sadie/workflows/MacOS%20Build%20and%20Test/badge.svg">
<img src="https://github.com/jwillis0720/sadie/workflows/MacOS%20Build%20and%20Test/badge.svg"
alt="Mac Build">
<a href="https://github.com/jwillis0720/sadie/actions/workflows/pyright.yml/badge.svg">
<img src="https://github.com/jwillis0720/sadie/actions/workflows/pyright.yml/badge.svg"
alt="Static Type">
</div>
</div>
<!-- markdownlint-restore -->
## About
---
<!-- use a href so you can use _blank to open new tab -->
**Documentation**: <a href="https://sadie.jordanrwillis.com" target="_blank">https://sadie.jordanrwillis.com</a>
**Source Code**: <a href="https://github.com/jwillis0720/sadie" target="_blank">https://github.com/jwillis0720/sadie</a>
**Colab**: [https://colab.research.google.com/github/jwillis0720/sadie](https://colab.research.google.com/github/jwillis0720/sadie/blob/main/notebooks/airr_c/SADIE_DEMO.ipynb)
---
SADIE is the **S**equencing **A**nalysis and **D**ata library for **I**mmunoinformatics **E**xploration. The key feautures include:
- Provide pre-built **command line applications** for popular immunoinformatics applications.
- Provide a **low-level API framework** for immunoinformatics developers to build higher level tools.
- Provide a **testable** and **reusable** library that WORKS!
- Provide a **customizable** and **verified** germline reference library.
- Maintain data formats consistent with standards governed by the [**AIRR community**](https://docs.airr-community.org/en/stable/#table-of-contents)
- **Portability** ready to use out the box.
SADIE is billed as a "**complete antibody library**", not because it aims to do everything, but because it aims to meet the needs of all immunoinformatics users. SADIE contains both low, mid and high level functionality for immunoinformatics tools and workflows. You can use SADIE as a framework to develop your own tools, use many of the prebuilt contributed tools, or run it in a notebook to enable data exploration. In addition, SADIE aims to port all code to python because relies heavily on the [Pandas](https://www.pandas.org) library, the workhorse of the data science/machine learning age.
## Installation
---
Installation is handled using the python package installer `pip`
```console
$ pip install sadie-antibody
```
### Development installation.
Pull requests are highly encouraged [here](https://github.com/jwillis0720/sadie/pulls). The development installation uses [pre-commit](https://pre-commit.com/), [flake8](https://flake8.pycqa.org/en/latest/) linting and [black](https://github.com/psf/black) style formatting to maintain code readability and reausability.
```console
$ git clone git@github.com/jwillis0720/sadie.git
$ pip install poetry
$ poetry install --with dev
```
## Quick Usage
Consult the [documentation](https://sadie.jordanrwillis.com) for complete usage. Or checkout our [Colab](https://colab.research.google.com/github/jwillis0720/sadie/blob/main/notebooks/airr_c/SADIE_DEMO.ipynb) notebook
### Command Line Usage
Annotate antibody sequences only from functional human imgt antibodies to a gzip output
```console
$ sadie airr my_sequence.fasta
```
### API
```python
from sadie.airr import Airr
# define a single sequence
pg9_seq = """
CAGCGATTAGTGGAGTCTGGGGGAGGCGTGGTCCAGCCTGGGTCGTCCCTGAGACTCTCCTGTGCAGCGT
CCGGATTCGACTTCAGTAGACAAGGCATGCACTGGGTCCGCCAGGCTCCAGGCCAGGGGCTGGAGTGGGT
GGCATTTATTAAATATGATGGAAGTGAGAAATATCATGCTGACTCCGTATGGGGCCGACTCAGCATCTCC
AGAGACAATTCCAAGGATACGCTTTATCTCCAAATGAATAGCCTGAGAGTCGAGGACACGGCTACATATT
TTTGTGTGAGAGAGGCTGGTGGGCCCGACTACCGTAATGGGTACAACTATTACGATTTCTATGATGGTTA
TTATAACTACCACTATATGGACGTCTGGGGCAAAGGGACCACGGTCACCGTCTCGAGC""".replace(
"\n", ""
)
# initialize the api
air_api = Airr("human")
# run single sequence string
airr_table = air_api.run_single("PG9", pg9_seq)
```
## License
[](https://opensource.org/licenses/MIT)
- Copyright © Jordan R. Willis & Troy M. Sincomb
| /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/README.md | 0.458349 | 0.725746 | README.md | pypi |
import re
from functools import lru_cache
from typing import Any, List, Optional, Set, Union
from uuid import UUID, uuid4
from Bio.Seq import Seq
from pandas._libs.missing import NAType
from pydantic import BaseModel, validator
@lru_cache(maxsize=1)
def get_nt_validator_regex() -> Any:
return re.compile(r"^[ACNTGYRWSKMDVHBacntgyrwskmdvhb]+\Z")
@lru_cache(maxsize=1)
def get_aa_validator_regex() -> Any:
return re.compile(r"^[ACDEFGHIKLMNPQRSTVWXYacdefghiklmnpqrstvwxy]+\Z")
class RearrargmentCategory(BaseModel):
"""
The category of rearrangement annotation object
Atttributes
-----------
category: str
The category of rearrangement objectect:
Categories
----------
Input:
The input sequence to the V(D)J assignment process.
Identifiers:
Primary and foreign key identifiers for linking AIRR data across files and databases.
Primary Annotations:
The primary outputs of the V(D)J assignment process, which includes the gene locus, V, D, J, and C gene calls, various flags, V(D)J junction sequence, copy number (duplicate_count), and the number of reads contributing to a consensus input sequence (consensus_count).
Alignment Annotations:
Detailed alignment annotations including the input and germline sequences used in the alignment; score, identity, statistical support (E-value, likelihood, etc); and the alignment itself through CIGAR strings for each aligned gene.
Alignment Positions:
The start/end positions for genes in both the input and germline sequences.
Region Sequence:
Sequence annotations for the framework regions (FWRs) and complementarity-determining regions (CDRs).
Region Positions:
Positional annotations for the framework regions (FWRs) and complementarity-determining regions (CDRs).
Junction Lengths:
Lengths for junction sub-regions associated with aspects of the V(D)J recombination process.
"""
category: str
@validator("category")
@classmethod
def validate_category(cls, v: str) -> str:
valid_categories: Set[str] = {
"input",
"identifiers",
"primary_annotations",
"alignment_annotations",
"alignment_positions",
"region_sequence_annotations",
"region_positions",
"junction_lengths",
}
if v not in valid_categories:
raise ValueError(f"{v} is not a valid category, use {valid_categories}")
return v
class InputSequence(BaseModel):
"""
These required VDJ Sequences are taken from https://docs.airr-community.org/en/stable/datarep/rearrangements.html
Attributes - AIRR 1.3
---------------------
sequence_id: Optional[Union[str,UUID]]:
Unique query sequence identifier for the Rearrangment. Most often this will be the input sequence header or a substring thereof, but may also be a custom identifier defined by the tool in cases where query sequences have been combined in some fashion prior to alignment. If not, given explicitly, will be randomly generated a UUID
sequence: Union[Seq,str]
The sequence vdj_c sequence or the rearrangment that starts at the first nt of the alignment. It is reverse complimented if necessary.
sequence_aa: Optional[Union[Seq,str]]:
Amino acid translation of the nucleotide sequence (not the raw sequence).
Attributes - non-AIRR
---------------------
raw_sequence: Optional[Union[Seq,str]]:
The unmodified query sequence
category: Optional[RearrargmentCategory]:
The category of rearrangement objectect
"""
sequence_id: Optional[Union[str, UUID]]
sequence: Union[Seq, str]
raw_sequence: Optional[Union[Seq, str]] = None # non airr
sequence_aa: Optional[Union[Seq, str]] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="input")
@validator("sequence_id", always=True)
@classmethod
def validate_sequence_id(cls, v: Optional[Union[str, UUID]]) -> str:
"""If no sequence id is provided, use a unique UUID"""
if not v:
v = uuid4()
return str(v)
@validator("sequence")
@classmethod
def validate_sequence(cls, v: Union[Seq, str]) -> Seq:
"""Check for valid nt sequence"""
nt_validator: re.Pattern[Any] = get_nt_validator_regex()
if isinstance(v, Seq):
v = str(v)
if not nt_validator.match(v):
raise ValueError(f"{v} is not a valid nucleotide sequence, must only contain ACGTN")
else:
return Seq(v)
@validator("raw_sequence")
@classmethod
def validate_raw_sequence(cls, v: Union[Seq, str]) -> Seq:
"""Check for valid nt sequence"""
nt_validator: re.Pattern[Any] = get_nt_validator_regex()
if isinstance(v, Seq):
v = str(v)
if not nt_validator.match(v):
raise ValueError(f"{v} is not a valid nucleotide sequence, must only contain ACGTN")
else:
return Seq(v)
@validator("sequence_aa")
@classmethod
def validate_sequence_aa(cls, v: Union[Seq, str]) -> Union[None, Seq]:
aa_validator: re.Pattern[Any] = get_aa_validator_regex()
if isinstance(v, Seq):
v = str(v)
if not aa_validator.match(v):
raise ValueError(f"{v} is not a valid amino acid sequence, must only contain ACDEFGHIKLMNPQRSTVWXY")
return Seq(v)
@staticmethod
def get_airr_fields() -> List[str]:
return ["sequence_id", "sequence"]
class Config:
arbitrary_types_allowed = True
class PrimaryAnnotations(BaseModel):
"""
The primary outputs of the V(D)J assignment process, which includes the gene locus, V, D, J, and C gene calls, various flags, V(D)J junction sequence, copy number (duplicate_count), and the number of reads contributing to a consensus input sequence (consensus_count). Taken from https://docs.airr-community.org/en/stable/datarep/rearrangements.html
Attributes - AIRR 1.3
---------------------
rev_comp: bool
True if the alignment is on the opposite strand (reverse complemented) with respect to the query sequence. If True then all output data, such as alignment coordinates and sequences, are based on the reverse complement of ‘sequence’.
productive: bool
True if the V(D)J sequence is predicted to be productive:
1. Coding region has an open reading frame
2. No defect in the start codon, splicing sites or regulatory elements.
3. No internal stop codons.
4. An in-frame junction region.
vj_in_frame bool:
True if the V and J gene alignments are in-frame
stop_codon: Optional[bool]
True if the aligned sequence contains a stop codon.
complete_vdj: Optional[bool]
True if the sequence alignment spans the entire V(D)J region. Meaning, sequence_alignment includes both the first V gene codon that encodes the mature polypeptide chain (i.e., after the leader sequence) and the last complete codon of the J gene (i.e., before the J:C splice site). This does not require an absence of deletions within the internal FWR and CDR regions of the alignment.
locus: Optional[str]
Gene locus (chain type). Note that this field uses a controlled vocabulary that is meant to provide a generic classification of the locus, not necessarily the correct designation according to a specific nomenclature. here are the loci IGH, IGK, IGL, TRA, TRB, TRD, or TRG.
v_call: Union[str, List[str]]
V gene with allele. If referring to a known reference sequence in a database the relevant gene/allele nomenclature should be followed (e.g., IGHV4-59*01 if using IMGT/GENE-DB).
d_call: Optional[str]
First or only D gene with allele. If referring to a known reference sequence in a database the relevant gene/allele nomenclature should be followed (e.g., IGHD3-10*01 if using IMGT/GENE-DB).
d2_call: Optional[str]
Second D gene with allele. If referring to a known reference sequence in a database the relevant gene/allele nomenclature should be followed (e.g., IGHD3-10*01 if using IMGT/GENE-DB).
j_call: Union[str, List[str]]
J gene with allele. If referring to a known reference sequence in a database the relevant gene/allele nomenclature should be followed (e.g., IGHJ4*02 if using IMGT/GENE-DB).
c_call: Optional[str]
Constant region gene with allele. If referring to a known reference sequence in a database the relevant gene/allele nomenclature should be followed (e.g., IGHG1*01 if using IMGT/GENE-DB).
Attributes - Non-AIRR
---------------------
v_call_top: Optional[str]
The top V gene call in a comma seperated list of V gene calls.
v_call_top_gene: Optional[str]
The top V gene call without the allele. (ex. IGHV1-69)
v_call_top_allele: Optional[str]
The top V gene call alleld. (ex. *01)
d_call_top: Optional[str]
The top D gene call in a comma seperated list of D gene calls.
d_call_gene: Optional[str]
The top D gene call without the allele. (ex. IGHD3-3)
d_call_allele: Optional[str]
The top D gene call alleld. (ex. *01)
j_call_top: Optional[str]
The top J gene call in a comma seperated list of J gene calls.
j_call_top_gene: Optional[str] # Non Airr
The top J gene call without the allele. (ex. IGHJ4)
j_call_top_allele: Optional[str] # Non Airr
The top J gene call alleld. (ex. *02)
c_call_allele: Optional[str] # Non Airr
The top C gene call alleld. (ex. *01)
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="primary_annotations")
The category of the rearrangement object
"""
# Airr fields
rev_comp: bool
productive: bool
vj_in_frame: Optional[bool] = None
stop_codon: Optional[bool] = None
complete_vdj: Optional[bool] = None
locus: Optional[str] = None
v_call: Union[str, List[str]]
d_call: Optional[Union[str, List[str]]] = None
d2_call: Optional[str] = None
j_call: Union[str, List[str]]
c_call: Optional[str] = None
# Non Airr fields
v_call_top: Optional[str] = None
v_call_top_gene: Optional[str] = None
v_call_top_allele: Optional[str] = None
d_call_top: Optional[str] = None
d_call_gene: Optional[str] = None
d_call_allele: Optional[str] = None
j_call_top: Optional[str] = None
j_call_top_gene: Optional[str] = None
j_call_top_allele: Optional[str] = None
c_call_allele: Optional[str] = None
reference_name: Optional[str] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="primary_annotations")
@staticmethod
def get_airr_fields() -> List[str]:
return [
"rev_comp",
"productive",
"vj_in_frame",
"stop_codon",
"complete_vdj",
"locus",
"reference_name",
"v_call",
"d_call",
"j_call",
"v_call_top",
"d_call_top",
"j_call_top",
]
class Config:
arbitrary_types_allowed = True
class AlignmentAnnotations(BaseModel):
"""
Detailed alignment annotations including the input and germline sequences used in the alignment; score, identity, statistical support (E-value, likelihood, etc); and the alignment itself through CIGAR strings for each aligned gene.
Attributes - AIRR 1.3:
---------------------
sequecne_alignment: Union[str, Seq]
Aligned portion of query sequence, including any indel corrections or numbering spacers, such as IMGT-gaps. Typically, this will include only the V(D)J region, but that is not a requirement.
seqeunce_alignment_aa: Optional[Union[str, Seq]]
Amino acid translation of the aligned query sequence.
germline_alignement: Union[str, Seq]
Assembled, aligned, full-length inferred germline sequence spanning the same region as the sequence_alignment field (typically the V(D)J region) and including the same set of corrections and spacers (if any)
germline_alignment_aa: Optional[Union[str, Seq]]
Amino acid translation of the aligned germline sequence.
v_score: Optional[float]
Alignment score for the V gene.
v_identity: Optional[float]
Fractional identity of the V gene alignment
v_support: Optional[float]
V gene alignment E-value, p-value, likelihood, probability or other similar measure of support for the V gene assignment as defined by the alignment tool.
v_cigar: str
CIGAR string for the V gene alignment.
d_score: Optional[float]
Alignment score for the D gene.
d_identity: Optional[float]
Fractional identity of the D gene alignment
d_support: Optional[float]
D gene alignment E-value, p-value, likelihood, probability or other similar measure of support for the D gene assignment as defined by the alignment tool.
d_cigar: str
CIGAR string for the D gene alignment.
d2_score: Optional[float]
Alignment score for the second D gene.
d2_identity: Optional[float]
Fractional identity of the second D gene alignment
d2_support: Optional[float]
Second D gene alignment E-value, p-value, likelihood, probability or other similar measure of support for the second D gene assignment as defined by the alignment tool.
d2_cigar: Optional[str]
CIGAR string for the second D gene alignment.
j_score: Optional[float]
Alignment score for the J gene.
j_identity: Optional[float]
Fractional identity of the J gene alignment
j_support: Optional[float]
J gene alignment E-value, p-value, likelihood, probability or other similar measure of support for the J gene assignment as defined by the alignment tool.
j_cigar: str
CIGAR string for the J gene alignment.
junction: Union[str, Seq]
Junction region nucleotide sequence, where the junction is defined as the CDR3 plus the two flanking conserved codons.
junction_aa: Optional[Union[str, Seq]]
Amino acid translation of the junction.
np1: Optional[Union[str, Seq]]
Nucleotide sequence of the combined N/P region between the V gene and first D gene alignment or between the V gene and J gene alignments.
np1_aa: Optional[Union[str, Seq]]
Amino acid translation of the np1 field.
np2: Optional[Union[str, Seq]]
Nucleotide sequence of the combined N/P region between either the first D gene and J gene alignments or the first D gene and second D gene alignments.
np2_aa: Optional[Union[str, Seq]]
Amino acid translation of the np2 field.
np3: Optional[Union[str, Seq]]
Nucleotide sequence of the combined N/P region between the second D gene and J gene alignments.
np3_aa: Optional[Union[str, Seq]]
Amino acid translation of the np3 field.
c_score: Optional[float]
Alignment score for the C gene alignment.
c_identity: Optional[float]
Fractional identity of the C gene alignment
c_support: Optional[float]
C gene alignment E-value, p-value, likelihood, probability or other similar measure of support for the C gene assignment as defined by the alignment tool.
c_cigar: Optional[str]
CIGAR string for the C gene alignment.
Attributes - Non AIRR:
---------------------
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="alignment_annotations")
The category of the rearrangement object
"""
sequence_alignment: Union[str, Seq]
sequence_alignment_aa: Optional[Union[str, Seq]] = None
germline_alignment: Union[str, Seq]
germline_alignment_aa: Optional[Union[str, Seq]] = None
v_score: Optional[float] = None
v_identity: Optional[float] = None
v_support: Optional[float] = None
v_cigar: str
d_score: Optional[float] = None
d_identity: Optional[float] = None
d_support: Optional[float] = None
d_cigar: str
d2_score: Optional[float] = None
d2_identity: Optional[float] = None
d2_support: Optional[float] = None
d2_cigar: Optional[str] = None
j_score: Optional[float] = None
j_identity: Optional[float] = None
j_support: Optional[float] = None
j_cigar: str
junction: Union[str, Seq]
junction_aa: Optional[Union[str, Seq]] = None
np1: Optional[Union[str, Seq]] = None
np1_aa: Optional[Union[str, Seq]] = None
np2: Optional[Union[str, Seq]] = None
np2_aa: Optional[Union[str, Seq]] = None
np3: Optional[Union[str, Seq]] = None
np3_aa: Optional[Union[str, Seq]] = None
c_score: Optional[float] = None
c_identity: Optional[float] = None
c_support: Optional[float] = None
c_cigar: Optional[str] = None
# Non Airr
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="alignment_annotations")
class Config:
arbitrary_types_allowed = True
@staticmethod
def get_airr_fields() -> List[str]:
return [
"sequence_alignment",
"sequence_alignment_aa",
"germline_alignment",
"germline_alignment_aa",
"v_score",
"v_identity",
"v_support",
"v_cigar",
"d_score",
"d_identity",
"d_support",
"d_cigar",
"j_score",
"j_identity",
"j_support",
"j_cigar",
"junction",
"junction_aa",
"np1",
"np2",
]
class AlignmentPositions(BaseModel):
"""
The start/end positions for genes in both the input and germline sequences.
Attributes:
----------
v_sequence_start: Optional[int]
Start position of the V gene in the query sequence (1-based closed interval).
v_sequence_end: Optional[int]
End position of the V gene in the query sequence (1-based closed interval).
v_germline_start: Optional[int]
Alignment start position in the V gene reference sequence (1-based closed interval).
v_germline_end: Optional[int]
Alignment end position in the V gene reference sequence (1-based closed interval).
v_alignment_start: Optional[int]:
Start position of the V gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
v_alignment_end: Optional[int]:
End position of the V gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
d_sequence_start: Optional[int]:
Start position of the first or only D gene in the query sequence. (1-based closed interval).
d_sequence_end: Optional[int]
End position of the first or only D gene in the query sequence. (1-based closed interval).
d_germline_start: Optional[int]
Alignment start position in the D gene reference sequence for the first or only D gene (1-based closed interval).
d_germline_end: Optional[int]
Alignment end position in the D gene reference sequence for the first or only D gene (1-based closed interval).
d_alignment_start: Optional[int]
Start position of the first or only D gene in both the sequence_alignment and germline_alignment fields (1-based closed interval).
d_alignment_end: Optional[int]
End position of the first or only D gene in both the sequence_alignment and germline_alignment fields (1-based closed interval).
d2_sequence_start: Optional[int]
Start position of the second D gene in the query sequence (1-based closed interval).
d2_sequence_end: Optional[int]
End position of the second D gene in the query sequence (1-based closed interval).
d2_germline_start: Optional[int]
Alignment start position in the second D gene reference sequence (1-based closed interval).
d2_germline_end: Optinal[int]
Alignment end position in the second D gene reference sequence (1-based closed interval).
d2_alignment_start: Optinal[int]
Start position of the second D gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
d2_alignment_end: Optional[int]
End position of the second D gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
j_sequence_start: Optional[int]
Start position of the J gene in the query sequence (1-based closed interval).
j_sequence_end: Opitional[int]
End position of the J gene in the query sequence (1-based closed interval).
j_germline_start: Optional[int]
Alignment start position in the J gene reference sequence (1-based closed interval).
j_germline_end: Optional[int]
Alignment end position in the J gene reference sequence (1-based closed interval).
j_alignment_start: Optional[int]
Start position of the J gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
j_alignment_end: Optional[int]
End position of the J gene alignment in both the sequence_alignment and germline_alignment fields (1-based closed interval).
"""
v_sequence_start: Optional[Union[int, NAType]] = None
v_sequence_end: Optional[Union[int, NAType]] = None
v_germline_start: Optional[Union[int, NAType]] = None
v_germline_end: Optional[Union[int, NAType]] = None
v_alignment_start: Optional[Union[int, NAType]] = None
v_alignment_end: Optional[Union[int, NAType]] = None
d_sequence_start: Optional[Union[int, NAType]] = None
d_sequence_end: Optional[Union[int, NAType]] = None
d_germline_start: Optional[Union[int, NAType]] = None
d_germline_end: Optional[Union[int, NAType]] = None
d_alignment_start: Optional[Union[int, NAType]] = None
d_alignment_end: Optional[Union[int, NAType]] = None
d2_sequence_start: Optional[Union[int, NAType]] = None
d2_sequence_end: Optional[Union[int, NAType]] = None
d2_germline_start: Optional[Union[int, NAType]] = None
d2_germline_end: Optional[Union[int, NAType]] = None
d2_alignment_start: Optional[Union[int, NAType]] = None
d2_alignment_end: Optional[Union[int, NAType]] = None
j_sequence_start: Optional[int] = None
j_sequence_end: Optional[int] = None
j_germline_start: Optional[int] = None
j_germline_end: Optional[int] = None
j_alignment_start: Optional[int] = None
j_alignment_end: Optional[int] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="alignment_positions")
@validator(
"v_sequence_start",
"v_sequence_end",
"v_germline_start",
"v_germline_end",
"v_alignment_start",
"v_alignment_end",
"d_sequence_start",
"d_sequence_end",
"d_germline_start",
"d_germline_end",
"d_alignment_start",
"d_alignment_end",
"d2_sequence_start",
"d2_sequence_end",
"d2_germline_start",
"d2_germline_end",
"d2_alignment_start",
"d2_alignment_end",
"j_sequence_start",
"j_sequence_end",
"j_germline_start",
"j_germline_end",
"j_alignment_start",
"j_alignment_end",
)
@classmethod
def validate_with_na(cls, v: Union[int, NAType]) -> Union[int, None]:
if v is None or isinstance(v, int):
return v
if isinstance(v, NAType):
return None
raise ValueError(f"Invalid value for alignment_positions: {v}")
class Config:
arbitrary_types_allowed = True
@staticmethod
def get_airr_fields() -> List[str]:
return [
"v_sequence_start",
"v_sequence_end",
"v_germline_start",
"v_germline_end",
"v_alignment_start",
"v_alignment_end",
"d_sequence_start",
"d_sequence_end",
"d_germline_start",
"d_germline_end",
"d_alignment_start",
"d_alignment_end",
"j_sequence_start",
"j_sequence_end",
"j_germline_start",
"j_germline_end",
"j_alignment_start",
"j_alignment_end",
]
class RegionSequences(BaseModel):
"""
Sequence annotations for the framework regions (FWRs) and complementarity-determining regions (CDRs).
Attributes
----------
fwr1: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned FWR1 region.
fwr1_aa: Optional[Union[str,Seq]]
Amino acid translation of the fwr1 field.
cdr1: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned CDR1 region.
cdr1_aa: Optional[Union[str,Seq]]
Amino acid translation of the cdr1 field.
fwr2: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned FWR2 region.
fwr2_aa: Optional[Union[str,Seq]]
Amino acid translation of the fwr2 field.
cdr2: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned CDR2 region.
cdr2_aa: Optional[Union[str,Seq]]
Amino acid translation of the cdr2 field.
fwr3: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned FWR3 region.
fwr3_aa: Optional[Union[str,Seq]]
Amino acid translation of the fwr3 field.
cdr3: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned CDR3 region.
cdr3_aa: Optional[Union[str,Seq]]
Amino acid translation of the cdr3 field.
fwr4: Optional[Union[str,Seq]]
Nucleotide sequence of the aligned FWR4 region.
fwr4_aa: Optional[Union[str,Seq]]
Amino acid translation of the fwr4 field.
"""
fwr1: Optional[Union[str, Seq]] = None
fwr1_aa: Optional[Union[str, Seq]] = None
cdr1: Optional[Union[str, Seq]] = None
cdr1_aa: Optional[Union[str, Seq]] = None
fwr2: Optional[Union[str, Seq]] = None
fwr2_aa: Optional[Union[str, Seq]] = None
cdr2: Optional[Union[str, Seq]] = None
cdr2_aa: Optional[Union[str, Seq]] = None
fwr3: Optional[Union[str, Seq]] = None
fwr3_aa: Optional[Union[str, Seq]] = None
cdr3: Optional[Union[str, Seq]] = None
cdr3_aa: Optional[Union[str, Seq]] = None
fwr4: Optional[Union[str, Seq]] = None
fwr4_aa: Optional[Union[str, Seq]] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="region_sequence_annotations")
class Config:
arbitrary_types_allowed = True
@staticmethod
def get_airr_fields() -> List[str]:
return [
"fwr1",
"fwr1_aa",
"cdr1",
"cdr1_aa",
"fwr2",
"fwr2_aa",
"cdr2",
"cdr2_aa",
"fwr3",
"fwr3_aa",
"cdr3",
"cdr3_aa",
"fwr4",
"fwr4_aa",
]
class RegionPositions(BaseModel):
"""
Positional annotations for the framework regions (FWRs) and complementarity-determining regions (CDRs).
Attributes
----------
fwr1_start: Optinal[int]
FWR1 start position in the query sequence (1-based closed interval).
fwr1_end: Optional[int]
FWR1 end position in the query sequence (1-based closed interval).
cdr1_start : Optional[int]
CDR1 start position in the query sequence (1-based closed interval).
cdr1_end: Optional[int]
CDR1 end position in the query sequence (1-based closed interval).
fwr2_start: Optinal[int]
FWR2 start position in the query sequence (1-based closed interval).
fwr2_end: Optional[int]
FWR2 end position in the query sequence (1-based closed interval).
cdr2_start : Optional[int]
CDR2 start position in the query sequence (1-based closed interval).
cdr2_end: Optional[int]
CDR2 end position in the query sequence (1-based closed interval).
fwr3_start: Optinal[int]
FWR3 start position in the query sequence (1-based closed interval).
fwr3_end: Optional[int]
FWR3 end position in the query sequence (1-based closed interval).
cdr3_start : Optional[int]
CDR3 start position in the query sequence (1-based closed interval).
cdr3_end: Optional[int]
CDR3 end position in the query sequence (1-based closed interval).
fwr4_start: Optinal[int]
FWR4 start position in the query sequence (1-based closed interval).
fwr4_end: Optional[int]
FWR4 end position in the query sequence (1-based closed interval).
"""
fwr1_start: Optional[int] = None
fwr1_end: Optional[int] = None
cdr1_start: Optional[int] = None
cdr1_end: Optional[int] = None
fwr2_start: Optional[int] = None
fwr2_end: Optional[int] = None
cdr2_start: Optional[int] = None
cdr2_end: Optional[int] = None
fwr3_start: Optional[int] = None
fwr3_end: Optional[int] = None
cdr3_start: Optional[int] = None
cdr3_end: Optional[int] = None
fwr4_start: Optional[int] = None
fwr4_end: Optional[int] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="region_positions")
class Config:
arbitrary_types_allowed = True
@staticmethod
def get_airr_fields() -> List[str]:
return [
"fwr1_start",
"fwr1_end",
"cdr1_start",
"cdr1_end",
"fwr2_start",
"fwr2_end",
"cdr2_start",
"cdr2_end",
"fwr3_start",
"fwr3_end",
"cdr3_start",
"cdr3_end",
"fwr4_start",
"fwr4_end",
]
class JunctionLengths(BaseModel):
"""
Lengths for junction sub-regions associated with aspects of the V(D)J recombination process.
Attributes
----------
junction_length: Optional[int]
Number of nucleotides in the junction sequence.
junction_aa_length: Optional[int]
Number of amino acids in the junction sequence.
np1_length: Optinal[int]
Number of nucleotides between the V gene and first D gene alignments or between the V gene and J gene alignments.
np2_length: Optinal[int]
Number of nucleotides between either the first D gene and J gene alignments or the first D gene and second D gene alignments.
np3_length: Optinal[int]
Number of nucleotides between the second D gene and J gene alignments.
n1_length: Optinal[int]
Number of untemplated nucleotides 5’ of the first or only D gene alignment.
n2_length: Optinal[int]
Number of untemplated nucleotides 3’ of the first or only D gene alignment.
n3_length: Optinal[int]
Number of untemplated nucleotides 3’ of the second D gene alignment.
p3v_length: Optinal[int]
Number of palindromic nucleotides 3’ of the V gene alignment.
p5d_length: Optinal[int]
Number of palindromic nucleotides 5’ of the first or only D gene alignment.
p3d_length: Optinal[int]
Number of palindromic nucleotides 3’ of the first or only D gene alignment.
p5d2_length: Optinal[int]
Number of palindromic nucleotides 5’ of the second D gene alignment.
p3d2_length: Optinal[int]
Number of palindromic nucleotides 3’ of the second D gene alignment.
p5j_length: Optinal[int]
Number of palindromic nucleotides 5’ of the J gene alignment.
"""
junction_length: Optional[int] = None
junction_aa_length: Optional[int] = None
np1_length: Optional[int] = None
np2_length: Optional[int] = None
np3_length: Optional[int] = None
n1_length: Optional[int] = None
n2_length: Optional[int] = None
n3_length: Optional[int] = None
p3v_length: Optional[int] = None
p5d_length: Optional[int] = None
p3d_length: Optional[int] = None
p5d2_length: Optional[int] = None
p3d2_length: Optional[int] = None
p5j_length: Optional[int] = None
category: Optional[RearrargmentCategory] = RearrargmentCategory(category="junction_lengths")
class Config:
arbitrary_types_allowed = True
@staticmethod
def get_airr_fields() -> List[str]:
return [
"junction_length",
"junction_aa_length",
]
class ReceptorChain(BaseModel):
input_sequence: InputSequence
primary_annotations: PrimaryAnnotations
alignment_annotations: AlignmentAnnotations
alignment_positions: Optional[AlignmentPositions] = None
region_sequences: Optional[RegionSequences] = None
region_positions: Optional[RegionPositions] = None
junction_lengths: Optional[JunctionLengths] = None
@staticmethod
def from_single(
sequence_id: str,
sequence: Union[str, Seq],
reference_name: str = "human",
database: str = "imgt",
) -> "ReceptorChain":
"""
Create a receptor chain from a single sequence.
Parameters
----------
sequence_id: str
Identifier for the sequence.
sequence: Union[str,Seq]
Sequence data.
Returns
-------
receptor_chain: ReceptorChain
Receptor chain.
"""
from sadie.airr import Airr
from sadie.airr.airrtable import AirrSeries, AirrTable
airr_api = Airr(reference_name)
result: AirrTable = airr_api.run_single(sequence_id, str(sequence))
result_sliced: AirrSeries = result.iloc[0] # type: ignore
# my py won't stop complaining unless I pass the sliced object back through itself
return ReceptorChain(**result_sliced.to_receptor_chain_object().__dict__)
def __str__(self) -> str:
printable: List[str] = []
for key in self.__fields__.keys():
sub_obj = self.__dict__[key].__dict__
printable.append(key)
printable.append("-" * len(key))
for sub_key in sub_obj.keys():
printable.append(f"{sub_key} : {sub_obj[sub_key]}")
printable.append("\n")
return "\n".join(printable)
class Antibody(BaseModel):
heavy_chain: ReceptorChain
light_chain: ReceptorChain
class Antibodies(BaseModel):
antibodies: List[Antibody]
class TCR(BaseModel):
heavy_chain: ReceptorChain
light_chain: ReceptorChain | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/receptor/rearrangment.py | 0.897936 | 0.481027 | rearrangment.py | pypi |
scheme_numbering = {
"imgt": {
"heavy": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 38,
"fwr2_aa_start": 39,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 65,
"fwr3_aa_start": 66,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 37,
"fwr2_aa_start": 38,
"fwr2_aa_end": 56,
"cdr2_aa_start": 57,
"cdr2_aa_end": 64,
"fwr3_aa_start": 65,
"fwr3_aa_end": 106,
"cdr3_aa_start": 107,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 54,
"cdr2_aa_start": 55,
"cdr2_aa_end": 66,
"fwr3_aa_start": 67,
"fwr3_aa_end": 106,
"cdr3_aa_start": 107,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 35,
"cdr1_aa_start": 36,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 54,
"cdr2_aa_start": 55,
"cdr2_aa_end": 74,
"fwr3_aa_start": 75,
"fwr3_aa_end": 106,
"cdr3_aa_start": 107,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"contact": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 30,
"cdr1_aa_start": 35,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 51,
"cdr2_aa_start": 52,
"cdr2_aa_end": 66,
"fwr3_aa_start": 67,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 116,
"fwr4_aa_start": 117,
"fwr4_aa_end": 128,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 27,
"cdr1_aa_start": 28,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 52,
"cdr2_aa_start": 53,
"cdr2_aa_end": 68,
"fwr3_aa_start": 69,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
},
"light": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 38,
"fwr2_aa_start": 39,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 65,
"fwr3_aa_start": 66,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 69,
"fwr3_aa_start": 70,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 70,
"fwr3_aa_start": 71,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 70,
"fwr3_aa_start": 71,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 40,
"fwr2_aa_start": 41,
"fwr2_aa_end": 55,
"cdr2_aa_start": 56,
"cdr2_aa_end": 70,
"fwr3_aa_start": 71,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 117,
"fwr4_aa_start": 118,
"fwr4_aa_end": 128,
},
"contact": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 29,
"cdr1_aa_start": 30,
"cdr1_aa_end": 42,
"fwr2_aa_start": 43,
"fwr2_aa_end": 51,
"cdr2_aa_start": 52,
"cdr2_aa_end": 69,
"fwr3_aa_start": 70,
"fwr3_aa_end": 104,
"cdr3_aa_start": 105,
"cdr3_aa_end": 116,
"fwr4_aa_start": 117,
"fwr4_aa_end": 128,
},
},
},
"kabat": {
"heavy": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 33,
"fwr2_aa_start": 34,
"fwr2_aa_end": 50,
"cdr2_aa_start": 51,
"cdr2_aa_end": 57,
"fwr3_aa_start": 58,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 32,
"fwr2_aa_start": 33,
"fwr2_aa_end": 51,
"cdr2_aa_start": 52,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 58,
"fwr3_aa_start": 59,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 30,
"cdr1_aa_start": 31,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 65,
"fwr3_aa_start": 66,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"contact": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 29,
"cdr1_aa_start": 30,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 46,
"cdr2_aa_start": 47,
"cdr2_aa_end": 58,
"fwr3_aa_start": 69,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 101,
"fwr4_aa_start": 102,
"fwr4_aa_end": 113,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 47,
"cdr2_aa_start": 48,
"cdr2_aa_end": 60,
"fwr3_aa_start": 61,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
},
"light": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 32,
"fwr2_aa_start": 33,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 51,
"fwr3_aa_start": 52,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
# "contact": {
# "fwr1_aa_start": 1,
# "fwr1_aa_end": 27,
# "cdr1_aa_start": 28,
# "cdr1_aa_end": 42,
# "fwr2_aa_start": 43,
# "fwr2_aa_end": 51,
# "cdr2_aa_start": 52,
# "cdr2_aa_end": 69,
# "fwr3_aa_start": 70,
# "fwr3_aa_end": 104,
# "cdr3_aa_start": 105,
# "cdr3_aa_end": 116,
# "fwr4_aa_start": 117,
# "fwr4_aa_end": 128,
# },
},
},
"chothia": {
"heavy": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 33,
"fwr2_aa_start": 34,
"fwr2_aa_end": 50,
"cdr2_aa_start": 51,
"cdr2_aa_end": 57,
"fwr3_aa_start": 58,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 32,
"fwr2_aa_start": 33,
"fwr2_aa_end": 51,
"cdr2_aa_start": 52,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 25,
"cdr1_aa_start": 26,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 58,
"fwr3_aa_start": 59,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 30,
"cdr1_aa_start": 31,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 65,
"fwr3_aa_start": 66,
"fwr3_aa_end": 94,
"cdr3_aa_start": 95,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
"contact": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 29,
"cdr1_aa_start": 30,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 46,
"cdr2_aa_start": 47,
"cdr2_aa_end": 58,
"fwr3_aa_start": 69,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 101,
"fwr4_aa_start": 102,
"fwr4_aa_end": 113,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 35,
"fwr2_aa_start": 36,
"fwr2_aa_end": 47,
"cdr2_aa_start": 48,
"cdr2_aa_end": 60,
"fwr3_aa_start": 61,
"fwr3_aa_end": 92,
"cdr3_aa_start": 93,
"cdr3_aa_end": 102,
"fwr4_aa_start": 103,
"fwr4_aa_end": 113,
},
},
"light": {
"imgt": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 26,
"cdr1_aa_start": 27,
"cdr1_aa_end": 32,
"fwr2_aa_start": 33,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 51,
"fwr3_aa_start": 52,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"scdr": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"chothia": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"abm": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
"kabat": {
"fwr1_aa_start": 1,
"fwr1_aa_end": 23,
"cdr1_aa_start": 24,
"cdr1_aa_end": 34,
"fwr2_aa_start": 35,
"fwr2_aa_end": 49,
"cdr2_aa_start": 50,
"cdr2_aa_end": 56,
"fwr3_aa_start": 57,
"fwr3_aa_end": 88,
"cdr3_aa_start": 89,
"cdr3_aa_end": 97,
"fwr4_aa_start": 98,
"fwr4_aa_end": 107,
},
},
},
} | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/numbering/scheme_numbering.py | 0.411466 | 0.393094 | scheme_numbering.py | pypi |
from __future__ import annotations
import logging
import re
from typing import Any, Iterable, List, Optional, Union
import numpy as np
import numpy.typing as npt
import pandas as pd
from Levenshtein import distance as lev_distance
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from sadie.airr import AirrTable, LinkedAirrTable
logger = logging.getLogger("Cluster")
class Cluster:
"""Main clustering class.
This class is used to cluster a given set of data points.
"""
def __init__(
self,
airrtable: Union[AirrTable, LinkedAirrTable],
linkage: str = "complete",
groupby: Optional[str] = None,
lookup: List[str] = ["cdr1_aa", "cdr2_aa", "cdr3_aa"],
pad_somatic: bool = False,
include_only_v_gene: bool = False,
):
"""Initialize the clustering class.
Arguments
---------
airrtable (AirrTable, LinkedAirrTable): The airrtable to cluster.
linkage (str): The linkage method to use. Default is complete. default is complete.
groupby (str): The linkage method to use. Default is complete. default is complete.
pad_somatic (bool): Whether to decrease the distance by 1 for every commons sommatic muttaion. Must run mutation analysis firsts
Raises
------
TypeError
No airrtable was provided.
ValueError
groupby columns must be in the airrtable.
ValueError
lookup columns must be in the airrtable
"""
if not isinstance(airrtable, (AirrTable, LinkedAirrTable)):
raise TypeError("airrtable table must be a AirrTable or LinkedAirrTable")
self.include_only_v_gene = include_only_v_gene
if lookup == ["cdr1_aa", "cdr2_aa", "cdr3_aa"] and isinstance(airrtable, LinkedAirrTable):
lookup = [i + "_heavy" for i in lookup] + [i + "_light" for i in lookup]
if groupby is not None:
diff = set(groupby).difference(set(airrtable.columns))
if diff:
raise ValueError(f"groupby column(s) {diff} not found in airrtable")
if pad_somatic:
if isinstance(airrtable, LinkedAirrTable):
if "mutations_heavy" not in airrtable.columns or "mutations_light" not in airrtable.columns:
raise ValueError(
"pad_somatic requires mutations_heavy and mutations_light in columns. Run mutational analysis first with sadie.arirr.methods"
)
else:
self.pad_somatic_values = ["mutations_heavy", "mutations_light"]
else:
if "mutations" not in airrtable.columns:
raise ValueError(
"pad_somatic requires mutations_heavy and mutations_light in columns. Run mutational analysis first with sadie.arirr.methods"
)
else:
self.pad_somatic_values = ["mutations"]
diff = set(lookup).difference(set(airrtable.columns))
if diff:
raise ValueError(f"lookup column(s) {diff} not found in airrtable")
self.airrtable = airrtable
self.linkage = linkage
self.groupby = groupby
self.lookup = lookup
self.key_column = airrtable.key_column
self.distance_df = None
self.model = None
self.pad_somatic = pad_somatic
if isinstance(self.airrtable, LinkedAirrTable):
self._type = "linked"
else:
self._type = "unlinked"
def _get_v_gene_only(self, row: Iterable[str]) -> List[str]:
row = list(row)
if row:
return [i for i in row if int(re.findall(r"\d+", i)[0]) < 94]
return row
def _get_distance_df(self, df: pd.DataFrame) -> Any:
"""Given a dataframe, get the N x N pairwise distances using Levenshtein distance of the lookup"""
if self.pad_somatic:
_lookup = self.lookup + self.pad_somatic_values
if self.include_only_v_gene:
logger.info(f"Including only V genes for {len(df)} rows")
if self._type == "linked":
df["mutations_heavy"] = df["mutations_heavy"].apply(self._get_v_gene_only)
df["mutations_light"] = df["mutations_light"].apply(self._get_v_gene_only)
else:
df["mutations"] = df["mutations"].apply(self._get_v_gene_only)
else:
_lookup = self.lookup
df_lookup = df[_lookup].to_dict(orient="index")
def calc_lev(x: npt.ArrayLike, y: npt.ArrayLike) -> float:
dist = 0
for metric in self.lookup:
dist += lev_distance(str(df_lookup[x[0]][metric]), str(df_lookup[y[0]][metric])) # type: ignore[index]
if self.pad_somatic and x[0] != y[0]: # type: ignore[index]
if len(self.pad_somatic_values) == 2:
_mutations_1_heavy = df_lookup[x[0]][self.pad_somatic_values[0]] # type: ignore[index]
_mutations_2_heavy = df_lookup[y[0]][self.pad_somatic_values[0]] # type: ignore[index]
_mutations_1_light = df_lookup[x[0]][self.pad_somatic_values[1]] # type: ignore[index]
_mutations_2_light = df_lookup[y[0]][self.pad_somatic_values[1]] # type: ignore[index]
subtract_heavy = len(np.intersect1d(_mutations_1_heavy, _mutations_2_heavy))
subtract_light = len(np.intersect1d(_mutations_1_light, _mutations_2_light))
subtract_all = subtract_heavy + subtract_light
else:
_mutations_1 = df_lookup[x[0]][self.pad_somatic_values[0]] # type: ignore[index]
_mutations_2 = df_lookup[y[0]][self.pad_somatic_values[0]] # type: ignore[index]
subtract_all = len(np.intersect1d(_mutations_1, _mutations_2))
dist -= subtract_all
return max(dist, 0)
X: npt.ArrayLike = np.array(df.index).reshape(-1, 1)
return pairwise_distances(X, metric=calc_lev, n_jobs=-1)
def cluster(self, distance_threshold: int = 3) -> Union[AirrTable, LinkedAirrTable]:
"""Cluster the data.
This method clusters the data using the specified linkage and affinity
methods.
Arguments
---------
distance_threshold (int): The maximum distance between two points to be. Default is 3.
"""
if self.groupby is None:
self.distance_df = self._get_distance_df(self.airrtable)
model = AgglomerativeClustering(
linkage=self.linkage, affinity="precomputed", distance_threshold=distance_threshold, n_clusters=None
)
model.fit(self.distance_df)
self.model = model
# Create the data frame
self.airrtable["cluster"] = model.labels_
else:
cluster_catcher = []
for g, g_df in self.airrtable.groupby(self.groupby):
logger.info(f"Clustering group {g}")
# sub_df = g_df
sub_df = g_df.copy()
self.distance_df = self._get_distance_df(sub_df)
# Calculate the linkage matrix
model = AgglomerativeClustering(
linkage=self.linkage,
affinity="precomputed",
distance_threshold=distance_threshold,
n_clusters=None,
)
if len(sub_df) == 1:
_labels = [0]
else:
model.fit(self.distance_df)
_labels = model.labels_
# Create the data frame
if isinstance(g, str):
labels = list(map(lambda x: f"{g}_{str(x)}", _labels))
elif isinstance(g, (list, tuple)):
_sub_labels = "_".join([str(i) for i in g])
labels = list(map(lambda x: f"{_sub_labels}_{str(x)}", _labels))
else:
raise ValueError("groupby must be a string or a list/tuple of strings")
sub_df["cluster"] = labels
cluster_catcher.append(sub_df)
self.airrtable = pd.concat(cluster_catcher)
if self._type == "unlinked":
return AirrTable(self.airrtable, key_column=self.key_column)
return LinkedAirrTable(self.airrtable, key_column=self.key_column) | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/cluster/cluster.py | 0.907975 | 0.422147 | cluster.py | pypi |
import logging
from ast import literal_eval
import pandas as pd
from numpy import nan
from sadie.numbering.scheme_numbering import scheme_numbering
from .constants import NUMBERING_RESULTS
logger = logging.getLogger("NUMBERING")
class NumberingResults(pd.DataFrame):
def __init__(self, *args, scheme="", region_definition="", allowed_chains=[], allowed_species=[], **kwargs):
# use the __init__ method from DataFrame to ensure
# that we're inheriting the correct behavior
super(NumberingResults, self).__init__(*args, **kwargs)
# self["scheme"] = scheme
# self["region_definition"] = region_definition
# self["allowed_species"] = ",".join(allowed_species)
# self["allowed_chains"] = ",".join(allowed_chains)
# self._add_segment_regions()
@property
def _constructor(self):
return NumberingResults
def get_alignment_table(self) -> pd.DataFrame:
"""Get a numbered alignment table from the numbering and insertions
Returns
-------
pd.DataFrame
A dataframe with Id, chain_type, scheme and numbering. Values are the amino acid sequences
"""
all_dataframes = []
# I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row
for index in range(len(self)):
all_dataframes.append(self._pivot_alignment(self.iloc[index]))
all_dataframes = pd.concat(all_dataframes)
# all_dataframes = all_dataframes.sort_index(axis=1)
# all_dataframes = all_dataframes.replace(nan, "-")
all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))
all_dataframes = all_dataframes.reset_index()
return self[["Id", "chain_type", "scheme"]].merge(all_dataframes, on="Id").copy()
def _get_region(self, row, start: int, end: int, segment_name) -> pd.Series:
with_segment = "".join(
list(
map(
lambda x: x[-1],
list(
filter(
lambda x: x[0] >= start and x[0] <= end,
list(
zip(
row["Numbering"],
row["Insertion"],
row["Numbered_Sequence"],
)
),
)
),
)
)
)
without_segment = with_segment.replace("-", "")
return pd.Series(
{
f"{segment_name}_gaps": with_segment,
f"{segment_name}_no_gaps": without_segment,
}
)
def _add_segment_regions(self) -> "NumberingResults":
"""Private method to delineate the framework and cdr boundaries from the numbering
Returns
-------
NumberingResults
Instance of NumberingResults
"""
return_frames = []
for group, sub_df in self.groupby(["scheme", "region_definition", "Chain"]):
numbering = group[0]
chain = {"H": "heavy", "KL": "light"}[group[-1]]
boundaries = group[1]
numbering_lookup = scheme_numbering[numbering][chain][boundaries]
for region in [
"fwr1_aa",
"cdr1_aa",
"fwr2_aa",
"cdr2_aa",
"fwr3_aa",
"cdr3_aa",
"fwr4_aa",
]:
_start = numbering_lookup[f"{region}_start"]
_end = numbering_lookup[f"{region}_end"]
sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))
return_frames.append(sub_df)
segmented_df = pd.concat(return_frames).reset_index(drop=True)
# everything preceding the antibody
segmented_df["leader"] = segmented_df[["sequence", "seqstart_index"]].apply(lambda x: x[0][: x[1]], axis=1)
# everything following the antibody. keyword tail will clash with pandas
segmented_df["follow"] = segmented_df[["sequence", "seqend_index"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)
return segmented_df
def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:
"""Private method to pivot a segmented row into an alignment series
Parameters
----------
row : pd.Series
indidual Numbering result row
Returns
-------
pivoted dataframe
"""
pivoted_df = (
pd.DataFrame(
zip(row["Numbering"], row["Insertion"], row["Numbered_Sequence"]),
columns=["numbering", "insertion", "sequence"],
)
.assign(Id=row["Id"])
.pivot("Id", ["numbering", "insertion"], "sequence")
)
return pivoted_df
@staticmethod
def read_csv(*args, **kwargs):
return NumberingResults(
pd.read_csv(
*args,
index_col=0,
dtype=NUMBERING_RESULTS,
converters={"Numbering": literal_eval, "Insertion": literal_eval, "Numbered_Sequence": literal_eval},
**kwargs,
)
) | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/renumbering/result.py | 0.863809 | 0.290713 | result.py | pypi |
from collections import UserString
from typing import Callable, Generator
from pydantic.fields import ModelField
# TODO: go through and see which are viable to use; tests need to be fixed first in test_g3 to handle this
SPECIES = {
"rhesus": "macaque",
"homo_sapiens": "human",
"mus": "mouse",
"rattus_norvegicus": "rat",
"oryctolagus_cuniculus": "rabbit",
"macaca_mulatta": "rhesus",
"sus_scrofa": "pig",
"vicugna_pacos": "alpaca",
"bos_taurus": "cow",
"alpaca": "alpaca",
"human": "human",
"macaque": "macaque",
"mouse": "mouse",
"rabbit": "rabbit",
"dog": "dog",
"cat": "cat",
"rat": "rat",
"pig": "pig",
# 'amberjack': 'amberjack',
# 'bass': 'bass',
# 'boar': 'boar',
# 'bull_shark': 'bull_shark',
# 'camel': 'camel',
# 'carp': 'carp',
# 'catfish': 'catfish',
# 'char': 'char',
# 'chinese_perch': 'chinese_perch',
# 'clearnose_skate': 'clearnose_skate',
# 'cod': 'cod',
# 'crab_eating_macaque': 'crab_eating_macaque',
# 'dolphin': 'dolphin',
# 'ferret': 'ferret',
# 'flounder': 'flounder',
# 'goat': 'goat',
# 'goldfish': 'goldfish',
# 'horn_shark': 'horn_shark',
# 'horse': 'horse',
# 'icefish': 'icefish',
# 'junglefowl': 'junglefowl',
# 'ladyfish': 'ladyfish',
# 'little_skate': 'little_skate',
# 'night_monkey': 'night_monkey',
# 'nurse_shark': 'nurse_shark',
# 'platypus': 'platypus',
# 'pufferfish': 'pufferfish',
# 'ratfish': 'ratfish',
# 'rockcod': 'rockcod',
# 'salmon': 'salmon',
# 'sandbar_shark': 'sandbar_shark',
# 'shark': 'shark',
# 'sheep': 'sheep',
# 'spotted_wolffish': 'spotted_wolffish',
# 'trout': 'trout',
# 'tubot': 'tubot',
# 'wobbegong': 'wobbegong',
# 'zebrafish': 'zebrafish',
}
class Species(UserString):
species = SPECIES
@classmethod
def __get_validators__(cls) -> Generator[Callable[[str, ModelField], str], None, None]:
yield cls.validate
@classmethod
def validate(cls, value: str, field: ModelField) -> str:
if not isinstance(value, str):
raise ValueError(f"{field} [{value}] must be a string")
value = value.strip().lower().replace(" ", "_")
if value not in SPECIES:
raise ValueError(f"{field} [{value}] must be in {SPECIES.keys()}")
value = SPECIES[value]
return value | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/typing/species.py | 0.401336 | 0.403802 | species.py | pypi |
from __future__ import annotations
import warnings
from pathlib import Path
from typing import Optional, Set
from sadie.airr.igblast.igblast import ensure_prefix_to
# package/module level
from sadie.reference import YamlRef
class GermlineData:
"""
The germline data paths are extremely cumbersome to workwith. This class will abstract away their paths to make it easier to fold into IgBLAST
Examples
--------
>>> gd = GermlineData('human')
>>> gd.base_dir
/Users/jwillis/repos/sadie/airr/data/germlines
>>> gd.v_gene_dir
/Users/jwillis/repos/sadie/airr/data/germlines/blastdb/Ig/human/human_V'
>>> gd.aux_path
/Users/jwillis/repos/sadie/airr/data/germlines/aux_data/human_gl.aux
"""
def __init__(
self,
name: str,
receptor: str = "Ig",
database_dir: Optional[str | Path] = None,
scheme: str = "imgt",
):
"""
Parameters
----------
species : str
The species of interest, e.g. human
receptor : str, optional
the receptor type, by default "Ig"
"""
self.name = name
if database_dir:
self.base_dir = Path(database_dir).absolute()
else:
self.base_dir = Path(__file__).absolute().parent / "../data/germlines/"
self.blast_dir = Path(str(self.base_dir) + f"/{receptor}/blastdb/{name}/{name}_")
self.v_gene_dir = Path(self.blast_dir.__str__() + "V")
self.d_gene_dir = Path(self.blast_dir.__str__() + "D")
self.j_gene_dir = Path(self.blast_dir.__str__() + "J")
self.c_gene_dir = Path(self.blast_dir.__str__() + "C")
self.aux_path = self.base_dir / f"aux_db/{scheme}/{name}_gl.aux"
# the literal 'internal_data/{name}` must be discovered by IgBLAST
self.igdata = self.base_dir / f"{receptor}/"
@property
def base_dir(self) -> Path:
"""The base dir
Returns
-------
Path
The base directory path that contains all the germline data
"""
return self._base_dir
@base_dir.setter
def base_dir(self, directory: str | Path) -> None:
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"Base directory, {directory} not found")
self._base_dir = _path
@property
def blast_dir(self) -> Path:
return self._blast_dir
@blast_dir.setter
def blast_dir(self, directory: str | Path) -> None:
# Must be a parent since this is not a valid path yet
if not Path(directory).parent.exists():
raise FileNotFoundError(f"Blast directory, {directory} not found")
self._blast_dir = Path(directory)
@property
def v_gene_dir(self) -> Path:
"""The V gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
human_V does not exists but it's the prefix to human_V.nod and other files used by blast
"""
return self._v_gene_dir
@v_gene_dir.setter
def v_gene_dir(self, directory: str | Path) -> None:
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"V gene directory glob, {directory} not found")
self._v_gene_dir = _path
@property
def d_gene_dir(self) -> Path:
"""The D gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
ex: human_D does not exists but it's the prefix to human_D.nod and other files used by blast
"""
return self._d_gene_dir
@d_gene_dir.setter
def d_gene_dir(self, directory: str | Path) -> None:
_path = Path(directory)
if not ensure_prefix_to(_path):
warnings.warn(f"D gene directory not found for {self.name}", UserWarning)
self._d_gene_dir = _path
@property
def j_gene_dir(self) -> Path:
"""The J gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
ex: human_J does not exists but it's the prefix to human_j.nod and other files used by blast
"""
return self._j_gene_dir
@j_gene_dir.setter
def j_gene_dir(self, directory: str | Path) -> None:
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._j_gene_dir = _path
@property
def c_gene_dir(self) -> Path:
"""The C gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
ex: human_C does not exists but it's the prefix to human_C.nod and other files used by blast
"""
return self._c_gene_dir
@c_gene_dir.setter
def c_gene_dir(self, directory: str | Path) -> None:
_path = Path(directory)
if not ensure_prefix_to(_path):
warnings.warn(f"C gene directory not found for {self.name}", UserWarning)
self._c_gene_dir = _path
@property
def aux_path(self) -> Path:
"""The auxillary data path used to reconstruct CDR3 regions.
Returns
-------
Path
the fully qualified path to the species auxilary data
ex:/Users/jwillis/repos/sadie/airr/data/germlines/aux_data/{scheme}/human_gl.aux
"""
return self._aux_path
@aux_path.setter
def aux_path(self, directory: str | Path) -> None:
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._aux_path = _path
@property
def igdata(self) -> Path:
return self._igdata
@igdata.setter
def igdata(self, directory: Path) -> None:
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"IGDATA, {directory} not found")
self._igdata = _path
@staticmethod
def get_available_datasets() -> Set[str]:
"""A static non-instantiated method to get a list of avaialble species with the builtin data
Returns
-------
list
a list of tuples of the form (name, database)
"""
y = YamlRef()
return y.get_names() | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/airr/igblast/germline.py | 0.850903 | 0.210219 | germline.py | pypi |
import datetime
from typing import Dict, List, Union
from Bio.Seq import Seq
from Bio.SeqFeature import FeatureLocation, SeqFeature
from Bio.SeqRecord import SeqRecord
# Qualifier Dictionary
example_qualifiers_dict = {
"gene": "gene",
"latin": "latin",
"organism": "species",
"functional": "functional",
"scheme": "IMGT",
}
# Example Annotations
annotations = {
"organism": "latin",
"source": "species",
"date": (datetime.date.today().strftime("%d-%b-%Y")),
}
feature_types = [
"FWR1",
"FWR2",
"FWR3",
"FWR4",
"CDR1",
"CDR2",
"CDR3",
"VGene",
"JGene",
"DGene",
"IGK",
"IGK_Non-Productive",
"IGH",
"IGL",
]
class GenBankFeature:
def __init__(
self,
start: int,
end: int,
feature_type: str,
id: Union[str, None] = None,
qualifier_dict: Union[Dict[str, str], None] = None,
):
self.start = start
self.end = end
self.id = id
# what type of feature is this
self.feature_type = feature_type
# Can have other info about our feature
self.qualifier_dict = qualifier_dict
# Feature Location
self.location = FeatureLocation(self.start, self.end)
# our main feature
self._feature = SeqFeature(self.location, type=self.feature_type, qualifiers=self.qualifier_dict)
@property
def feature_type(self) -> str:
return self._feature_type
@feature_type.setter
def feature_type(self, t: str) -> None:
if t not in feature_types:
raise TypeError(f"{t} must be in {feature_types}")
else:
self._feature_type = t
@property
def feature(self) -> SeqFeature:
return self._feature
class GenBank:
def __init__(
self, sequence: Union[str, Seq], id: str, name: Union[str, None] = None, description: Union[str, None] = None
):
self.sequence = sequence
self.id = id
if name:
self.name = name
else:
self.name = id[0:16]
self.description = description or "<unknown description>"
# Our main GB record
self._record = SeqRecord(self.sequence, id=self.id, name=self.name, description=self.description)
@property
def record(self) -> SeqRecord:
return self._record
@property
def features(self) -> List[SeqFeature]:
_a: List[SeqFeature] = self.record.features
return _a
@property
def sequence(self) -> Seq:
return self._sequence
@sequence.setter
def sequence(self, seq: Union[str, Seq]) -> None:
if isinstance(seq, str):
self._sequence = Seq(seq)
elif isinstance(seq, Seq):
self._sequence = seq
else:
raise TypeError(f"{type(str)} must be instance of str or Bio.Seq")
def add_feature(self, feature: GenBankFeature) -> None:
if not isinstance(feature, GenBankFeature):
raise TypeError(f"{feature} must be of type {GenBankFeature}")
else:
self.features.append(feature.feature) | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/airr/airrtable/genbank.py | 0.827932 | 0.335052 | genbank.py | pypi |
from typing import Any, Dict
IGBLAST_AIRR: Dict[Any, str] = {
"sequence_id": "object",
"sequence": "object",
"locus": "object",
"stop_codon": "object",
"vj_in_frame": "object",
"v_frameshift": "object",
"productive": "object",
"rev_comp": "object",
"complete_vdj": "object",
"sequence_alignment": "object",
"sequence_alignment_aa": "object",
"germline_alignment": "object",
"germline_alignment_aa": "object",
"fwr1": "object",
"fwr1_aa": "object",
"fwr1_start": "Int16",
"fwr1_end": "Int16",
"cdr1": "object",
"cdr1_aa": "object",
"cdr1_start": "Int16",
"cdr1_end": "Int16",
"fwr2": "object",
"fwr2_aa": "object",
"fwr2_start": "Int16",
"fwr2_end": "Int16",
"cdr2": "object",
"cdr2_aa": "object",
"cdr2_start": "Int16",
"cdr2_end": "Int16",
"fwr3": "object",
"fwr3_aa": "object",
"fwr3_end": "Int16",
"fwr3_start": "Int16",
"cdr3": "object",
"cdr3_aa": "object",
"cdr3_end": "Int16",
"cdr3_start": "Int16",
"fwr4": "object",
"fwr4_aa": "object",
"fwr4_end": "Int16",
"fwr4_start": "Int16",
"v_alignment_start": "Int16",
"v_alignment_end": "Int16",
"v_call": "object",
"v_cigar": "object",
"v_germline_alignment": "object",
"v_germline_alignment_aa": "object",
"v_germline_start": "Int16",
"v_germline_end": "Int16",
"v_identity": "float32",
"v_score": "float32",
"v_sequence_alignment": "object",
"v_sequence_alignment_aa": "object",
"v_sequence_start": "Int16",
"v_sequence_end": "Int16",
"v_support": "float64",
"d_alignment_start": "Int16",
"d_alignment_end": "Int16",
"d_call": "object",
"d_cigar": "object",
"d_germline_alignment": "object",
"d_germline_alignment_aa": "object",
"d_germline_start": "Int16",
"d_germline_end": "Int16",
"d_identity": "float32",
"d_score": "float32",
"d_sequence_alignment": "object",
"d_sequence_alignment_aa": "object",
"d_sequence_start": "Int16",
"d_sequence_end": "Int16",
"d_support": "float64",
"j_alignment_start": "Int16",
"j_alignment_end": "Int16",
"j_call": "object",
"j_cigar": "object",
"j_germline_alignment": "object",
"j_germline_alignment_aa": "object",
"j_germline_start": "Int16",
"j_germline_end": "Int16",
"j_identity": "float32",
"j_score": "float32",
"j_sequence_alignment": "object",
"j_sequence_alignment_aa": "object",
"j_sequence_start": "Int16",
"j_sequence_end": "Int16",
"j_support": "float64",
"junction": "object",
"junction_aa": "object",
"junction_aa_length": "Int16",
"junction_length": "Int16",
"np1": "object",
"np1_length": "Int16",
"np2": "object",
"np2_length": "Int16",
}
CONSTANTS_AIRR = {
"c_call": "object",
"c_cigar": "object",
"c_germline_alignment": "object",
"c_germline_alignment_aa": "object",
"c_germline_start": "Int16",
"c_germline_end": "Int16",
"c_identity": "float32",
"c_score": "float32",
"c_sequence_alignment": "object",
"c_sequence_alignment_aa": "object",
"c_sequence_start": "Int16",
"c_sequence_end": "Int16",
"c_support": "float64",
}
OTHER_COLS = {
"d_call_top": "object",
"d_mutation": "float32",
"d_mutation_aa": "float32",
"d_penalty": "Int16",
"germline_alignment_aa_corrected": bool,
"iGL": "object",
"iGL_aa": "object",
"j_call_top": "object",
"j_mutation": "float32",
"j_mutation_aa": "float32",
"j_penalty": "Int16",
"liable": "bool",
"padded_five_prime": "bool",
"padded_three_prime": "bool",
"reference_name": "object",
"v_call_top": "object",
"v_germline_alignment_aa_corrected": "bool",
"v_mutation": "float32",
"v_mutation_aa": "float32",
"v_penalty": "Int16",
"vdj_aa": "object",
"vdj_aa": "object",
} | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/airr/airrtable/constants.py | 0.723895 | 0.350644 | constants.py | pypi |
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Set, Type
import pandas as pd
from yaml import load
try:
from yaml import CLoader, Loader
cload: Type[CLoader] | Type[Loader] = CLoader
except ImportError:
from yaml import Loader
cload = Loader
class YamlRef:
def __init__(self, filepath: None | Path | str = None):
if not filepath:
filepath = Path(__file__).parent.joinpath("data/reference.yml")
self.ref_path = filepath
self.yaml = load(open(self.ref_path), Loader=Loader)
self.yaml_df = self._normalize_and_verify_yaml()
def get_names(self) -> Set[str]:
"""Return a list of all names whcih can be annotated
Example
-------
yaml.get_names
>>> ['human','mouse','macque']
Returns
-------
set
unique types
"""
return set(self.yaml_df["name"].to_list())
def get_genes(self, name: str, source: str, species: str) -> List[str]:
"""Get the genes associated with a name, source, and species
Parameters
----------
name : str
ex. 'human'
source: str
ex. 'imgt'
species : str
ex.'human'
Returns
-------
list
list of genes
Examples
--------
# get all annotated class from a human imgt genes
object.get_genes('human','imgt','human')
>>> ['IGHV1-2*01','IGHV1-69*01'....]
"""
_a: List[str] = self.yaml.get(name).get(source).get(species)
return _a
def get_gene_segment(self, name: str, source: str, species: str, gene_segment: str) -> List[str]:
"""Get the genes associated with these keys
Parameters
----------
name : str
ex. 'human'
source: str
ex. 'imgt'
species : str
ex.'human'
gene_segment: str
ex. V
Returns
-------
list
list of genes of the gene segment
Examples
--------
object.get_gene_segment('human','imgt','human','V')
>>> ['IGHV1-2*01','IGHV1-69*01'....]
"""
return list(filter(lambda x: x[3] == gene_segment, self.get_genes(name, source, species)))
def get_yaml_as_dataframe(self) -> pd.DataFrame:
"""Return yaml as a normalized dataframe"""
return self.yaml_df
def __repr__(self) -> Any:
return self.yaml.__repr__()
def __iter__(self) -> Any:
"""Iter method will step through the yaml file"""
return self.yaml.__iter__()
def __getitem__(self, key: str) -> Any:
return self.yaml[key]
def __len__(self) -> int:
return len(self.yaml_df)
def _normalize_and_verify_yaml(self) -> pd.DataFrame:
dataframe_loader: List[Dict[str, str]] = []
data = self.yaml
for name in data:
for source in data.get(name):
for species in data.get(name).get(source):
dataframe_loader.append(
{
"name": name,
"source": source,
"species": species,
"genes": data.get(name).get(source).get(species),
}
)
_df = pd.DataFrame(dataframe_loader).explode("genes").reset_index(drop=True)
lookup: List[str] = ["name", "species", "genes"]
duplicated = _df.set_index(lookup).loc[_df.groupby(lookup).size() > 1] # type: ignore
if not duplicated.empty:
if len(duplicated["source"].unique()) == 1:
raise ValueError(f"{duplicated}\nappears twice")
else:
raise ValueError(f"{duplicated}\nappears twice from two difference sources")
return _df | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/reference/yaml.py | 0.926959 | 0.325923 | yaml.py | pypi |
from typing import Dict, List
from pydantic import BaseModel, validator
class Species(BaseModel):
"""What species to retrieve"""
species: str
class Source(BaseModel):
"""What database to retrieve"""
source: str
class GeneEntry(BaseModel):
"""V,D or J Gene Entry with validation"""
species: str
gene: str
source: str
# values: a dict containing the name-to-value mapping of any previously-validated fields
@validator("species")
def check_species(cls, v: str) -> str:
# pylint: disable=no-self-argument
return Species(**{"species": v}).species
@validator("gene")
def check_vgene(cls, v: str, values: Dict[str, str]) -> str:
# pylint: disable=no-self-argument
if v[3] not in ["V", "D", "J"]:
raise ValueError(f"gene must contain V,D or J at 3rd index, current have {v[3]} in {v} ")
return v
@validator("source")
def check_source(cls, v: str) -> str:
# pylint: disable=no-self-argument
if v not in ["imgt", "custom"]:
raise ValueError(f"{v} is not a valid source, chocies are 'imgt' or 'custom'")
return v
class GeneEntries(BaseModel):
"""V,D or J Gene Entry with validation"""
species: str
genes: List[str]
source: str
@validator("species")
def check_species(cls, v: str) -> str:
# pylint: disable=no-self-argument
return Species(**{"species": v}).species
@validator("genes", each_item=True)
def check_vgene(cls, v: str, values: Dict[str, str]) -> str:
# pylint: disable=no-self-argument
if v[3] not in ["V", "D", "J", "C", "A", "G", "M", "E"]:
raise ValueError(f"gene must contain V,D,J or C at 3rd index, current have {v[3]} in {v} ")
return v
@validator("source")
def check_source(cls, v: str) -> str:
# pylint: disable=no-self-argument
if v not in ["imgt", "custom"]:
raise ValueError(f"{v} is not a valid source, chocies are 'imgt' or 'custom'")
return v | /sadie_antibody-1.0.2.tar.gz/sadie_antibody-1.0.2/src/sadie/reference/models.py | 0.90957 | 0.45302 | models.py | pypi |
from collections import deque
from unittest.mock import Mock
class MockTmpFile(object):
_fn = None
def __init__(self, suffix = None, prefix = None, dir = None, remove = False):
if suffix is None:
suffix = '.mock'
if prefix is None:
prefix = __name__
if dir is None:
dir = '/tmp'
self._fn = '/'.join([dir, prefix + suffix])
def __enter__(self):
return self
def __exit__(self, *args):
pass
def close(self):
pass
def unlink(self):
pass
def write(self, data):
pass
def name(self):
return self._fn
class MockShUtil(object):
_expect = None
_return = None
_default = None
def __init__(self, cfg):
self._expect = []
self._return = {}
self._default = {}
self._mock = Mock()
self.mktmp = self._mock.mock_mktmp
self.mktmpdir = self._mock.mock_mktmpdir
self.getcwd = self._mock.mock_getcwd
self.makedirs = self._mock.mock_makedirs
self.chmod = self._mock.mock_chmod
self.chown = self._mock.mock_chown
self.chdir = self._mock.mock_chdir
self.getuid = self._mock.mock_getuid
self.getgid = self._mock.mock_getgid
self.lockd = self._mock.mock_lockd
self.rmtree = self._mock.mock_rmtree
self.unpack_archive = self._mock.mock_unpack_archive
self._configure(cfg)
def _configure(self, cfg):
self.mktmp.side_effect = self._mktmp
if cfg is None:
return
self._utilsDefault()
self._parseConfig(cfg)
self.mktmpdir.side_effect = self._mktmpdir
self.getcwd.side_effect = self._sideEffect('getcwd')
self.makedirs.side_effect = self._sideEffect('makedirs')
self.getuid.side_effect = self._sideEffect('getuid')
self.getgid.side_effect = self._sideEffect('getgid')
self.rmtree.side_effect = self._sideEffect('rmtree')
self.rmtree.side_effect = self._sideEffect('unpack_archive')
def _utilsDefault(self):
self._default['getcwd'] = '/testing/workdir'
self._default['getuid'] = 3000
self._default['getgid'] = 3000
def _parseConfig(self, cfg):
data = cfg.get('utils.sh', fallback = None)
if data is None:
data = cfg.get('shutil', fallback = '')
if data != '':
for l in data.splitlines():
l = l.strip()
if l != '':
x = l.split(';')
rtrn = x[0].strip()
cmdline = ';'.join(x[1:]).strip()
self._expect.append(cmdline)
util = cmdline.split(' ')[0].strip()
self._utilReturn(util, rtrn)
def _utilReturn(self, name, data):
if name == '':
raise RuntimeError('mock shutil: util name is empty')
if self._return.get(name, None) is None:
self._return[name] = deque()
self._return[name].appendleft(data)
def _sideEffect(self, util):
def wrapper(*args, **kwargs):
rtrn = self._return.get(util, None)
if rtrn is None:
return self._default.get(util, None)
try:
data = rtrn.pop()
except IndexError:
return self._default.get(util, None)
if data == '':
return self._default.get(util, None)
if util in ('getuid', 'getgid'):
return int(data)
return data
return wrapper
def _mktmp(self, suffix = None, prefix = None, dir = None, remove = False):
return MockTmpFile(suffix = suffix, prefix = prefix, dir = dir, remove = remove)
def _mktmpdir(self, suffix = None, prefix = None):
if suffix is None:
suffix = '.mock'
if prefix is None:
prefix = __name__
return prefix + suffix
def check(self):
got = []
for x in self._mock.mock_calls:
xname = x[0].replace('mock_', '', 1)
xargs = x[1]
cmdline = xname
if len(xargs) > 0:
cmdline = "%s %s" % (xname, ' '.join([str(i) for i in xargs]))
xkwargs = x[2]
for k, v in xkwargs.items():
v = str(v)
cmdline = "%s, %s=%s" % (cmdline, k, v)
got.append(cmdline)
assert got == self._expect, \
"mock shutil\n*** GOT:\n%s\n*** EXPECT:\n%s" % ('\n'.join(got), '\n'.join(self._expect)) | /sadm-0.21.tar.gz/sadm-0.21/tlib/_sadmtest/mock/utils/sh.py | 0.461988 | 0.172939 | sh.py | pypi |
import matplotlib.pyplot as plt
import numpy as np
import .plotHelper as pH
try:
plt.style.use('scientific_grid_no_space')
except:
pass
# -------------------------------------------------------------------------
# Modular plotting functions
# -------------------------------------------------------------------------
def plot_data(ax, x_data, y_data, x_error_data=None, y_error_data=None,
color='k', ls='-',
xlabel=None, ylabel=None,
xscale='linear', yscale='linear'):
# Plot 'x_' and 'y_data' on specified axis 'ax', including errors if
# present
fig = None
if ax == None:
fig, ax = plt.subplots()
# Plot data
if x_error_data or y_error_data:
ax.errorbar(x_data, y_data, xerr=x_error_data, yerr=y_error_data,
color=color, ls=ls)
else:
ax.plot(x_data, y_data, color=color, ls=ls)
# Labels
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
# Set scales
ax.set_xscale(xscale)
ax.set_yscale(yscale)
return fig, ax # 'fig' will be None if 'ax' was provided
def plot_histogram(ax, data, color='gray', bins=None,
xlabel=None, ylabel=None, xscale="linear", yscale="linear"):
# Plot a histogram of the specified DataFrame column 'df[key]'
fig = None
if ax == None:
fig, ax = plt.subplots()
mean, median, std_dev = np.mean(data), np.nanmedian(data), np.std(data)
ax.hist(data, bins=bins, density=True, color=color)
# Vertical lines for mean, median
ax.axvline(mean, c='r', ls='--')
ax.axvline(median, c='b', ls='--')
# Set scales
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
return fig, ax # 'fig' will be None if 'ax' was provided
def plot_data_correlations(ax, correlations_array,
cmap='jet', add_text=True, xlabels=[], ylabels=[]):
# Plot correlations between parameters as a heatmap
fig = None
if ax == None:
fig, ax = plt.subplots()
# Plot heatmap
ax.imshow(correlations_array, cmap=cmap)
# Axes ticks
ticks = np.arange(len(correlations_array))
ax.set_xticks(ticks)
ax.set_yticks(ticks)
# Axes tick labels
ax.set_xticklabels(xlabels)
ax.set_yticklabels(ylabels)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Add text annotations of correlation coefficients to heatmap
if add_text:
for i in range(len(correlations_array)):
for j in range(len(correlations_array)):
ax.text(j, i, f"{correlations_array[i, j]:.3f}", ha="center", va="center", color="w")
return fig, ax # 'fig' will be None if 'ax' was provided
# -------------------------------------------------------------------------
# Dataframe plotting functions
# -------------------------------------------------------------------------
def plot_df_key(ax, df, x_key, y_key, x_error_key=None, y_error_key=None,
color='k', ls='-',
xlabel=None, ylabel=None,
xscale="linear", yscale="linear"):
# Plot DataFrame columns 'df[x_key]' vs 'df[y_key]'
fig = None
if ax == None:
fig, ax = plt.subplots()
# Copy DF for plotting so it is not changed outside of this scope
plot_df = df.copy()
# Get data from DF
x_data = plot_df[x_key]
y_data = plot_df[y_key]
x_error_data = None
y_error_data = None
# Get error data from DF
if x_error_key:
x_error_data = plot_df[x_error_key]
if y_error_key:
y_error_data = plot_df[y_error_key]
# Plot data
fig, ax = plot_data(ax, x_data, y_data, x_error_data=x_error_data, y_error_data=y_error_data,
color=color, ls=ls, xlabel=xlabel, ylabel=ylabel, xscale=xscale, yscale=yscale)
return fig, ax # 'fig' will be None if 'ax' was provided
def plot_df_key_histogram(ax, df, key, color='gray', bins=None,
xlabel=None, ylabel=None, xscale="linear", yscale="linear"):
# Plot a histogram of the specified DataFrame column 'df[key]'
fig = None
if ax == None:
fig, ax = plt.subplots()
# Copy DF for plotting so it is not changed outside of this scope
plot_df = df.copy()
data = plot_df[key]
fig, ax = plot_histogram(ax, data, color=color, bins=bins,
xlabel=xlabel, ylabel=ylabel, xscale=xscale, yscale=yscale)
return fig, ax # 'fig' will be None if 'ax' was provided
def plot_df_correlations(ax, df, keys, cmap='jet', add_text=True):
# Calculate and plot correlations between 'keys' in DataFrame 'df'
import utilities.dataframeUtilities as dfut
fig = None
if ax == None:
fig, ax = plt.subplots()
plot_df = df.copy()
correlations = dfut.get_correlation(plot_df, columns=keys)
fig, ax = plot_data_correlations(ax, correlations,
cmap=cmap, add_text=add_text, xlabels=keys, ylabels=keys)
return fig, ax # 'fig' will be None if 'ax' was provided | /sadtools-0.0.2-py3-none-any.whl/analysis/plotting.py | 0.76856 | 0.63324 | plotting.py | pypi |
import numpy as np
from scipy.interpolate import splrep, splev
import math
# -------------------------------------------------------------------------
# Grid functions
# -------------------------------------------------------------------------
def create_grid(x_range, y_range, num_x_points=50, num_y_points=50):
# Utility function for creating a general mesh grid
x = np.linspace(*x_range, num=num_x_points)
y = np.linspace(*y_range, num=num_y_points)
grid = np.meshgrid(x, y)
return grid
# -------------------------------------------------------------------------
# Interpolation functions
# -------------------------------------------------------------------------
def determine_fits(x_list, data_list):
# Takes in a single x-variable list and a list of y-variable lists to generate fits for each
fits = []
# x_list = sorted(x_list)
for data in data_list:
fit = splrep(x_list, data)
fits.append(fit)
return fits
def evaluate_fits(x_list, fits_list):
# Takes in a list of x-value lists and an associated fits list for interpolation
y_data_list = [[] for _ in range(len(fits_list))]
for x in x_list:
for index, fit in enumerate(fits_list):
y_data_list[index].append(float(splev(x, fit)))
return y_data_list
# -------------------------------------------------------------------------
# Arithmetic utility functions
# -------------------------------------------------------------------------
def even_to_odd(number, direction='up'):
"""
Casts an even number to an odd number. Used mainly to centre axis labels for plots by having an odd number of
rows and columns.
Arguments:
number {Int} -- Even number to be cast to an odd number.
Keyword Arguments:
direction {str} -- If 'up', add 1 to the even number. If 'down', subtract 1 from the even number.
(default: {'up'})
Returns:
{int} -- Odd number
"""
number = int(number)
if number % 2 == 0:
if direction is 'up':
number += 1
elif direction is 'down':
number -= 1
else:
print("Invalid option {direction}. Valid options are \'up\' or \'down\'. Choosing \'up\'.")
number += 1
return number
def is_square(number: int) -> bool:
root = math.isqrt(number)
return number == root ** 2 | /sadtools-0.0.2-py3-none-any.whl/utilities/numerics.py | 0.709824 | 0.517205 | numerics.py | pypi |
import numpy as np
import utilities as util
import astropy.units as u
# -------------------------------------------------------------------------
# Derived Units
# -------------------------------------------------------------------------
cm3 = u.cm**3
dm3 = u.dm**3
m3 = u.m**3
g_cm3 = (u.g / cm3)
molar = u.mol / dm3
# -------------------------------------------------------------------------
# Constants
# -------------------------------------------------------------------------
avogadro_constant = 6.0221415E+23 / u.mol # atoms [mol^-1]
hydrogen_mass_amu = 1.008 # a.m.u., standard atomic weight
hydrogen_mass_cgs = 1.6733E-24 * u.g # [g]
# -------------------------------------------------------------------------
# Conversion functions for physical quantities
# -------------------------------------------------------------------------
def gas_density_to_hydrogen_number_density(gas_density, percentage_hydrogen=1, log=False):
# Converts gas density (assumed [g cm^-3]) to a hydrogen number density
# based on the percentage of hydrogen (default 100% hydrogen)
hydrogen_number_density = (gas_density / hydrogen_mass_cgs) * percentage_hydrogen
if log:
return np.log10(hydrogen_number_density.value)
else:
return hydrogen_number_density
def log_abundance_to_number_density(log_abundance, log_hydrogen_number_density, log_hydrogen_abundance=12):
# Using the log(hydrogen abundance) (default '12' for solar physics),
# convert from stellar photospheric abundance to a number density
log_number_density = log_abundance - log_hydrogen_abundance + log_hydrogen_number_density
number_density = 10**log_number_density
return number_density
def number_density_to_concentration(number_density):
# Assumes number density in [cm^-n] & 'avogadro_constant' in [mol^-1]
# so concentration ~ [mol cm^-n]
concentration = number_density / avogadro_constant
return concentration
def concentration_to_number_density(concentration):
# Assumes concentration ~ [mol cm^-3] & 'avogadro_constant' in [mol^-1]
# so number density in [cm^-3]
number_density = concentration * avogadro_constant
return number_density
# -------------------------------------------------------------------------
# Stoichiometry functions
# -------------------------------------------------------------------------
def calculate_stoichiometry(reactants_list, products_list, return_dicts=False):
reactant_stoichiometry = util.list_instances_to_dict(reactants_list)
product_stoichiometry = util.list_instances_to_dict(products_list)
if return_dicts:
return reactant_stoichiometry, product_stoichiometry
else:
return reactant_stoichiometry.values, product_stoichiometry.values
# -------------------------------------------------------------------------
# Reaction rate and timescale functions
# -------------------------------------------------------------------------
def modified_to_arrhenius_prefactor(alpha, beta, temperature):
# Modified Arrhenius rate prefactor to Arrhenius rate prefactor
return alpha * (temperature / 300)**beta
def modified_arrhenius_rate(alpha=1, beta=0, gamma=0, temperature=300):
# rate = alpha * (T/300[K])^beta * EXP(-gamma / T)
# Equivalent to standard Arrhenius for beta=0
return alpha * (temperature/300)**beta * np.exp(-gamma / temperature)
def arrhenius_rate(prefactor, activation_energy, temperature):
return prefactor * np.exp(-activation_energy / temperature)
def calculate_unitless_timescale(forward_rate, reverse_rate=None, reactant_stoichiometry=[1, 1]):
# Example case: A + B -> C
# Note that this is only valid if rates are dimensionless. Othwerwise,
# the rates need to be related to one another based on the number
# densities of the reactants and products
total_rate = forward_rate.value * np.sum(reactant_stoichiometry)
if reverse_rate != None:
total_rate += reverse_rate.value
timescale = 1 / total_rate
return timescale
# -------------------------------------------------------------------------
# Functions for determining equilibrium quantities
# -------------------------------------------------------------------------
def equilibrium_constant_dimensional(forward_rate_coefficient,
reverse_rate_coefficient):
# dimensional equilibrium constant K_eq = k_f / k_r
equilibrium_constant = forward_rate_coefficient / reverse_rate_coefficient
return equilibrium_constant
# -------------------------------------------------------------------------
# Utility functions for rates and timescales
# -------------------------------------------------------------------------
def scale_timescale_with_number_density(timescale, reactant_number_density):
# Scale timescale based on number of primary reactants; Wedemeyer-Boehm(2005)
scaled_timescale = timescale / reactant_number_density
return scaled_timescale
def convert_rate_to_molar(rate, order):
# Assumes rate is in [cm^-3*n s^-1] where 'n' is order of reaction
# Convert from cm^3 -> dm^3 & use Avogadro number to change from
# volume / atom -> volume / mol
volume_conversion = cm3.to(dm3)
conversion_factor = (volume_conversion * avogadro_constant)**order
molar_rate = rate * conversion_factor # units of [mol^n dm^-3n s^-1]
return molar_rate
# -------------------------------------------------------------------------
# Unit conversion functions
# -------------------------------------------------------------------------
def get_rate_unit_from_order(order, unit_system='cgs'):
# Based on the order of the reaction (n), determine the proper units for
# the reaction rate.
unit = None
if unit_system == 'cgs':
# [cm^3*n s^-1]
unit = (cm3)**order
elif unit_system == 'molar':
# [M^(1-n) s^-1]
unit = (u.M)**(1-order)
unit /= u.s
return unit | /sadtools-0.0.2-py3-none-any.whl/utilities/chemistryUtilities.py | 0.803868 | 0.427994 | chemistryUtilities.py | pypi |
from argparse import ArgumentParser, BooleanOptionalAction, Namespace
from os.path import expanduser
from sys import stdout
from uuid import UUID
from .cli import CLI
from .version import VERSION
def parse_args() -> Namespace:
parser = ArgumentParser(
prog="Safari Bookmarks CLI",
description="A utility to help manage Safari bookmarks.",
)
parser.set_defaults(command=None)
parser.add_argument(
"--version",
action="version",
version="%(prog)s v{version}".format(version=VERSION),
)
parser.add_argument(
"--file", "-f",
type=expanduser,
default="~/Library/Safari/Bookmarks.plist",
help="The path to the Safari bookmarks.",
)
parser.add_argument(
"--json",
action=BooleanOptionalAction,
default=False,
help="Render output as JSON",
)
parser.add_argument(
"--format", "-F",
required=False,
help="Customize the output format. Available placeholders: {title}, {url}, {id}, {type}, {prefix}, {suffix}."
)
subparsers = parser.add_subparsers(
title="commands",
required=True,
)
parser_list = subparsers.add_parser(
"list",
aliases=["ls", "show"],
description="List bookmarks and folders.",
)
parser_list.add_argument(
"target",
nargs="?",
help="The UUID or title of the bookmark or folder to show. Default shows all.",
)
parser_list.set_defaults(command="list")
parser_add = subparsers.add_parser(
"add",
aliases=["a", "create"],
description="Add a bookmark or folder.",
)
parser_add.add_argument(
"title",
help="The title of the bookmark or folder.",
)
parser_add.add_argument(
"--uuid",
type=UUID,
required=False,
help="The UUID to use. Default is to generate a new UUID.",
)
parser_add.add_argument(
"--to",
help="The target folder.",
)
group = parser_add.add_mutually_exclusive_group(required=True)
group.add_argument(
"--url",
help="The URL for the bookmark."
)
group.add_argument(
"--folder",
dest="list",
action=BooleanOptionalAction,
default=False,
help="Add a folder instead of a bookmark.",
)
parser_add.set_defaults(command="add")
parser_remove = subparsers.add_parser(
"remove",
aliases=["rm", "delete", "del"],
description="Remove a bookmark or folder.",
)
parser_remove.add_argument(
"target",
help="The UUID or title of the bookmark or folder to remove."
)
parser_remove.set_defaults(command="remove")
parser_move = subparsers.add_parser(
"move",
aliases=["mv"],
description="Move a bookmark or folder.",
)
parser_move.add_argument(
"target",
help="The UUID or title of the bookmark or folder to move."
)
parser_move.add_argument(
"--to",
help="The destination folder.",
)
parser_move.set_defaults(command="move")
parser_move = subparsers.add_parser(
"edit",
aliases=["e", "update", "change"],
description="Edit a bookmark or folder.",
)
parser_move.add_argument(
"target",
help="The UUID or title of the bookmark or folder to change.",
)
parser_move.add_argument(
"--title",
help="The new title to change.",
)
parser_move.add_argument(
"--url",
help="The new URL to change. Only used for bookmarks.",
)
parser_move.set_defaults(command="move")
return parser.parse_args()
def main():
args = parse_args()
CLI(args.file, stdout).run(args.command, args) | /safari_bookmarks_cli-0.2.0-py3-none-any.whl/safaribookmarks/main.py | 0.490968 | 0.228641 | main.py | pypi |
from contextlib import contextmanager
import plistlib
from typing import Generator, IO, Optional
import uuid
from .helpers import load, dump
from .models import WebBookmarkType, WebBookmarkTypeList, WebBookmarkTypeLeaf, WebBookmarkTypeProxy
DEFAULT_LIST_FORMAT = "{prefix: <{depth}}{title: <50}{type: <6}{id: <38}{url}"
class CLI():
def __init__(self, path: str, out: IO) -> None:
self.path = path
self.output = out
def run(self, command, args) -> None:
if command is None:
raise ValueError("No command specified")
if func := getattr(self, command):
func(args)
else:
raise ValueError(f"Invalid command: {command}")
@contextmanager
def with_bookmarks(self, update: bool = False) -> Generator[WebBookmarkTypeList, None, None]:
with open(self.path, "rb") as file:
bookmarks = load(file, plistlib.FMT_BINARY)
yield bookmarks
if update:
with open(self.path, "wb") as file:
dump(bookmarks, file, plistlib.FMT_BINARY)
def lookup(self, title: str, root: WebBookmarkType) -> Optional[WebBookmarkType]:
if title.lower() == str(root.web_bookmark_uuid).lower():
return root
elif isinstance(root, WebBookmarkTypeLeaf):
if root.uri_dictionary.get("title") == title:
return root
elif root.title == title:
return root
elif isinstance(root, WebBookmarkTypeList):
for child in root:
if result := self.lookup(title, child):
return result
return None
def parent(self, target: WebBookmarkType, root: WebBookmarkType) -> Optional[WebBookmarkTypeList]:
if target == root:
return root
elif isinstance(root, WebBookmarkTypeList):
if target in root:
return root
for child in root:
if result := self.parent(target, child):
return result
return None
def get_info(self, item: WebBookmarkType) -> tuple[str, str, str]:
if isinstance(item, WebBookmarkTypeLeaf):
return (
"leaf",
item.uri_dictionary.get("title", ""),
item.url_string,
)
elif isinstance(item, WebBookmarkTypeProxy):
return (
"proxy",
item.title,
""
)
elif isinstance(item, WebBookmarkTypeList):
return (
"list",
item.title,
"",
)
else:
return ("unknown", "", "")
def render_item(self, item: WebBookmarkType, format: str, depth: int = 0):
id = item.web_bookmark_uuid
type_, title, url = self.get_info(item)
self.output.write(
f"{format}\n".format(
depth=depth,
prefix="",
suffix="",
title=title.replace("\n", ""),
type=type_,
url=url,
id=str(id),
)
)
if isinstance(item, WebBookmarkTypeList):
self.render_children(item, format=format, depth=depth+1)
self.output.write("\n")
def render_children(self, item: WebBookmarkType, format: str, depth: int = 0):
for child in item.children:
self.render_item(child, format, depth=depth)
def render(self, root: WebBookmarkType, args, only_children=False):
if args.json:
self.output.write(root.model_dump_json(by_alias=True))
else:
format = args.format if args.format else DEFAULT_LIST_FORMAT
if only_children:
self.render_children(root, format=format)
else:
self.render_item(root, format=format)
def list(self, args):
with self.with_bookmarks("rb") as target:
if args.target:
target = self.lookup(args.target, target)
if target is None:
raise ValueError("Target not found")
self.render(target, args, True)
def add(self, args):
uuid_ = str(args.uuid or uuid.uuid4()).upper()
if args.list:
if not args.title:
raise ValueError("Title is required")
web_bookmark = WebBookmarkTypeList(
web_bookmark_uuid=uuid_,
title=args.title,
)
if args.url:
raise ValueError("URL is not supported by lists")
else:
web_bookmark = WebBookmarkTypeLeaf(
web_bookmark_uuid=uuid_,
url_string=args.url,
)
if args.title:
web_bookmark.uri_dictionary["title"] = args.title
with self.with_bookmarks(True) as target:
if args.to:
target = self.lookup(args.to, target)
if not isinstance(target, WebBookmarkTypeList):
raise ValueError("Invalid destination")
target.children.append(web_bookmark)
self.render(web_bookmark, args)
def remove(self, args):
with self.with_bookmarks(True) as parent:
target = self.lookup(args.target, parent)
if target is None:
raise ValueError("Target not found")
parent = self.parent(target, parent)
parent.remove(target)
self.render(target, args)
def move(self, args):
with self.with_bookmarks(True) as bookmarks:
target = self.lookup(args.target, bookmarks)
if target is None:
raise ValueError("Target not found")
parent = self.parent(target, bookmarks)
if args.to:
dest = self.lookup(args.to, bookmarks)
if not isinstance(dest, WebBookmarkTypeList):
raise ValueError("Invalid destination")
parent.remove(target)
dest.append(target)
self.render(dest, args)
def edit(self, args):
with self.with_bookmarks(True) as bookmarks:
target = self.lookup(args.target, bookmarks)
if target is None:
raise ValueError("Target not found")
if title := args.title:
if isinstance(target, WebBookmarkTypeList):
target.title = title
elif isinstance(target, WebBookmarkTypeLeaf):
target.uri_dictionary["title"] = title
else:
raise ValueError("Cannot update target title")
if url := args.url:
if isinstance(target, WebBookmarkTypeLeaf):
target.url_string = url
else:
raise ValueError("Cannot update target url")
self.render(target, args) | /safari_bookmarks_cli-0.2.0-py3-none-any.whl/safaribookmarks/cli.py | 0.708313 | 0.185947 | cli.py | pypi |
import abc
from typing import Tuple, Union
import numpy as np
import scipy.integrate
import scipy.spatial
from pydantic import BaseModel
class BaseEntityValidator(BaseModel):
"""
Validator for BaseEntity's config member.
Parameters
----------
name : str
Name of entity
"""
name: str
class BaseEntity(abc.ABC):
"""
Base implementation of a dynamics controlled entity within the saferl sim.
Parameters
----------
dynamics : BaseDynamics
Dynamics object for computing state transitions
control_default: np.ndarray
Default control vector used when no action is passed to step(). Typically 0 or neutral for each actuator.
control_min: np.ndarray
Optional minimum allowable control vector values. Control vectors that exceed this limit are clipped.
control_max: np.ndarray
Optional maximum allowable control vector values. Control vectors that exceed this limit are clipped.
control_map: dict
Optional mapping for actuator names to their indices in the state vector.
Allows dictionary action inputs in step().
"""
def __init__(self, dynamics, control_default, control_min=-np.inf, control_max=np.inf, control_map=None, **kwargs):
self.config = self._get_config_validator()(**kwargs)
self.name = self.config.name
self.dynamics = dynamics
self.control_default = control_default
self.control_min = control_min
self.control_max = control_max
self.control_map = control_map
self._state = self._build_state()
self.state_dot = np.zeros_like(self._state)
@classmethod
def _get_config_validator(cls):
return BaseEntityValidator
@abc.abstractmethod
def _build_state(self) -> np.ndarray:
raise NotImplementedError
def step(self, step_size, action=None):
"""
Executes a state transition simulation step for the entity.
Parameters
----------
step_size : float
Duration of simulation step in seconds
action : Union(dict, list, np.ndarray), optional
Control action taken by entity, by default None resulting in a control of control_default
When list or ndarray, directly used and control vector for dynamics model
When dict, unpacked into control vector. Requires control_map to be defined.
Raises
------
KeyError
Raised when action dict key not found in control map
ValueError
Raised when action is not one of the required types
"""
if action is None:
control = self.control_default.copy()
else:
if isinstance(action, dict):
assert self.control_map is not None, "Cannot use dict-type action without a control_map (see BaseEntity __init__())"
control = self.control_default.copy()
for action_name, action_value in action.items():
if action_name not in self.control_map:
raise KeyError(
f"action '{action_name}' not found in entity's control_map, "
f"please use one of: {self.control_map.keys()}"
)
control[self.control_map[action_name]] = action_value
elif isinstance(action, list):
control = np.array(action, dtype=np.float32)
elif isinstance(action, np.ndarray):
control = action.copy()
else:
raise ValueError("action must be type dict, list, or np.ndarray")
# enforce control bounds
control = np.clip(control, self.control_min, self.control_max)
# compute new state if dynamics were applied
self.state, self.state_dot = self.dynamics.step(step_size, self.state, control)
@property
def state(self) -> np.ndarray:
"""
Returns copy of entity's state vector.
Returns
-------
np.ndarray
copy of state vector
"""
return self._state.copy()
@state.setter
def state(self, value: np.ndarray):
self._state = value.copy()
@property
@abc.abstractmethod
def x(self):
"""get x"""
raise NotImplementedError
@property
@abc.abstractmethod
def y(self):
"""get y"""
raise NotImplementedError
@property
@abc.abstractmethod
def z(self):
"""get z"""
raise NotImplementedError
@property
@abc.abstractmethod
def position(self) -> np.ndarray:
"""get 3d position vector"""
raise NotImplementedError
@property
@abc.abstractmethod
def orientation(self) -> scipy.spatial.transform.Rotation:
"""
Get orientation of entity.
Returns
-------
scipy.spatial.transform.Rotation
Rotation transformation of the entity's local reference frame basis vectors in the global reference frame.
i.e. applying this rotation to [1, 0, 0] yields the entity's local x-axis in the global frame.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def velocity(self):
"""Get 3d velocity vector"""
raise NotImplementedError
class BaseRotationEntity(BaseEntity):
"""
Base implementation of a base entity with rotational states within the saferl sim.
Parameters
----------
dynamics : BaseDynamics
Dynamics object for computing state transitions
control_default: np.ndarray
Default control vector used when no action is passed to step(). Typically 0 or neutral for each actuator.
control_min: np.ndarray
Optional minimum allowable control vector values. Control vectors that exceed this limit are clipped.
control_max: np.ndarray
Optional maximum allowable control vector values. Control vectors that exceed this limit are clipped.
control_map: dict
Optional mapping for actuator names to their indices in the state vector.
Allows dictionary action inputs in step().
"""
def __init__(self, dynamics, control_default, control_min=-np.inf, control_max=np.inf, control_map=None, **kwargs):
super().__init__(
dynamics=dynamics,
control_default=control_default,
control_min=control_min,
control_max=control_max,
control_map=control_map,
**kwargs
)
@property
@abc.abstractmethod
def q1(self):
"""get first element of quaternion"""
raise NotImplementedError
@property
@abc.abstractmethod
def q2(self):
"""get second element of quaternion"""
raise NotImplementedError
@property
@abc.abstractmethod
def q3(self):
"""get third element of quaternion"""
raise NotImplementedError
@property
@abc.abstractmethod
def q4(self):
"""get fourth element of quaternion (scalar)"""
raise NotImplementedError
@property
@abc.abstractmethod
def quaternion(self) -> np.ndarray:
"""get 4d quaternion"""
raise NotImplementedError
@property
@abc.abstractmethod
def wx(self):
"""get wx"""
raise NotImplementedError
@property
@abc.abstractmethod
def wy(self):
"""get wy"""
raise NotImplementedError
@property
@abc.abstractmethod
def wz(self):
"""get wz"""
raise NotImplementedError
@property
@abc.abstractmethod
def angular_velocity(self) -> np.ndarray:
"""get 3d angular velocity vector"""
raise NotImplementedError
class BaseDynamics(abc.ABC):
"""
State transition implementation for a physics dynamics model. Used by entities to compute their next state when
their step() method is called.
Parameters
----------
state_min : float or np.ndarray
Minimum allowable value for the next state. State values that exceed this are clipped.
When a float, represents single limit applied to entire state vector.
When an ndarray, each element represents the limit to the corresponding state vector element.
state_max : float or np.ndarray
Maximum allowable value for the next state. State values that exceed this are clipped.
When a float, represents single limit applied to entire state vector.
When an ndarray, each element represents the limit to the corresponding state vector element.
angle_wrap_centers: np.ndarray
Enables circular wrapping of angles. Defines the center of circular wrap such that angles are within [center+pi, center-pi].
When None, no angle wrapping applied.
When ndarray, each element defines the angle wrap center of the corresponding state element.
Wrapping not applied when element is NaN.
"""
def __init__(
self,
state_min: Union[float, np.ndarray] = -np.inf,
state_max: Union[float, np.ndarray] = np.inf,
angle_wrap_centers: np.ndarray = None,
):
self.state_min = state_min
self.state_max = state_max
self.angle_wrap_centers = angle_wrap_centers
def step(self, step_size: float, state: np.ndarray, control: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes the dynamics state transition from the current state and control input.
Parameters
----------
step_size : float
Duration of the simulation step in seconds.
state : np.ndarray
Current state of the system at the beginning of the simulation step.
control : np.ndarray
Control vector of the dynamics model.
Returns
-------
Tuple[np.ndarray, np.ndarray]
Tuple of the system's next state and the state's instantaneous time derivative at the end of the step
"""
next_state, state_dot = self._step(step_size, state, control)
next_state = np.clip(next_state, self.state_min, self.state_max)
next_state = self._wrap_angles(next_state)
return next_state, state_dot
def _wrap_angles(self, state):
wrapped_state = state.copy()
if self.angle_wrap_centers is not None:
wrap_idxs = np.logical_not(np.isnan(self.angle_wrap_centers))
wrapped_state[wrap_idxs] = \
((wrapped_state[wrap_idxs] + np.pi) % (2 * np.pi)) - np.pi + self.angle_wrap_centers[wrap_idxs]
return wrapped_state
@abc.abstractmethod
def _step(self, step_size: float, state: np.ndarray, control: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class BaseODESolverDynamics(BaseDynamics):
"""
State transition implementation for generic Ordinary Differential Equation dynamics models.
Computes next state through numerical integration of differential equation.
Parameters
----------
integration_method : string
Numerical integration method used by dynamics solver. One of ['RK45', 'Euler'].
'RK45' is slow but very accurate.
'Euler' is fast but very inaccurate.
kwargs
Additional keyword arguments passed to parent BaseDynamics constructor.
"""
def __init__(self, integration_method="RK45", **kwargs):
self.integration_method = integration_method
super().__init__(**kwargs)
def compute_state_dot(self, t: float, state: np.ndarray, control: np.ndarray) -> np.ndarray:
"""
Computes the instantaneous time derivative of the state vector
Parameters
----------
t : float
Time in seconds since the beginning of the simulation step.
Note, this is NOT the total simulation time but the time within the individual step.
state : np.ndarray
Current state vector at time t.
control : np.ndarray
Control vector.
Returns
-------
np.ndarray
Instantaneous time derivative of the state vector.
"""
state_dot = self._compute_state_dot(t, state, control)
state_dot = self._clip_state_dot_by_state_limits(state, state_dot)
return state_dot
@abc.abstractmethod
def _compute_state_dot(self, t: float, state: np.ndarray, control: np.ndarray) -> np.ndarray:
raise NotImplementedError
def _clip_state_dot_by_state_limits(self, state, state_dot):
lower_bounded_states = state <= self.state_min
upper_bounded_state = state >= self.state_max
state_dot[lower_bounded_states] = np.clip(state_dot[lower_bounded_states], 0, np.inf)
state_dot[upper_bounded_state] = np.clip(state_dot[upper_bounded_state], -np.inf, 0)
return state_dot
def _step(self, step_size, state, control):
if self.integration_method == "RK45":
sol = scipy.integrate.solve_ivp(self.compute_state_dot, (0, step_size), state, args=(control, ))
next_state = sol.y[:, -1] # save last timestep of integration solution
state_dot = self.compute_state_dot(step_size, next_state, control)
elif self.integration_method == "Euler":
state_dot = self.compute_state_dot(0, state, control)
next_state = state + step_size * state_dot
else:
raise ValueError(f"invalid integration method '{self.integration_method}'")
return next_state, state_dot
class BaseVectorizedODESolverDynamics(BaseODESolverDynamics):
"""
State transition implementation for generic Ordinary Differential Equation dynamics models of the form
dx/dt = f(x)x + g(x)u.
At Each point in the numerical integration processes, f(x) and g(x) are computed at the integration point to find
A = f(x)
B = g(x)
To construct the matrix product dx/dt = Ax + Bu
Computes next state through numerical integration of differential equation.
Parameters
----------
kwargs
Additional keyword arguments passed to parent BaseODESolverDynamics constructor.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@abc.abstractmethod
def _gen_dynamics_matrices(self, state) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes the vectorized ODE matrices A, B with current system state before computing derivative.
Allows non-linear dynamics models to be linearized at each numerical integration interval.
Directly modifies self.A,
Parameters
----------
state : np.ndarray
Current state vector of the system.
Returns
-------
np.ndarray
A of dx/dt = Ax + Bu
np.ndarray
B of dx/dt = Ax + Bu
"""
raise NotImplementedError
def _compute_state_dot(self, t: float, state: np.ndarray, control: np.ndarray):
A, B = self._gen_dynamics_matrices(state)
state_dot = np.matmul(A, state) + np.matmul(B, control)
return state_dot
class BaseLinearODESolverDynamics(BaseVectorizedODESolverDynamics):
"""
State transition implementation for generic Linear Ordinary Differential Equation dynamics models of the form dx/dt = Ax+Bu.
Computes next state through numerical integration of differential equation.
Parameters
----------
A : np.ndarray
State transition matrix. A of dx/dt = Ax + Bu. Should be dimension len(n) x len(n)
B : npndarray
Control input matrix. B of dx/dt = Ax + Bu. Should be dimension len(n) x len(u)
kwargs
Additional keyword arguments passed to parent BaseVectorizedODESolverDynamics constructor.
"""
def __init__(self, A: np.ndarray, B: np.ndarray, **kwargs):
assert len(A.shape) == 2, f"A must be square matrix. Instead got shape {A.shape}"
assert len(B.shape) == 2, f"A must be square matrix. Instead got shape {B.shape}"
assert A.shape[0] == A.shape[1], f"A must be a square matrix, not dimension {A.shape}"
assert A.shape[1] == B.shape[0], (
"number of columns in A must match the number of rows in B." + f" However, got shapes {A.shape} for A and {B.shape} for B"
)
self.A = np.copy(A)
self.B = np.copy(B)
super().__init__(**kwargs)
def _gen_dynamics_matrices(self, state) -> Tuple[np.ndarray, np.ndarray]:
return self.A, self.B | /safe-autonomy-dynamics-1.0.1.tar.gz/safe-autonomy-dynamics-1.0.1/safe_autonomy_dynamics/base_models.py | 0.933089 | 0.511961 | base_models.py | pypi |
import abc
from typing import Tuple
import numpy as np
from safe_autonomy_dynamics.base_models import BaseEntity, BaseEntityValidator, BaseLinearODESolverDynamics
M_DEFAULT = 1
DAMPING_DEFAULT = 0
class BaseIntegratorValidator(BaseEntityValidator):
"""
Validator for Integrator kwargs.
Parameters
----------
position : list[float]
Length 3 list of x, y, z position values.
velocity : list[float]
Length 3 list of x, y, z velocity values.
Raises
------
ValueError
Improper list lengths for parameters 'position', 'velocity'
"""
x: float = 0
y: float = 0
z: float = 0
xdot: float = 0
ydot: float = 0
zdot: float = 0
class BaseIntegrator(BaseEntity):
"""
Base inerface for Integrator Entities
"""
def __init__(self, dynamics, control_default, control_min, control_max, control_map, **kwargs):
super().__init__(
dynamics=dynamics,
control_default=control_default,
control_min=control_min,
control_max=control_max,
control_map=control_map,
**kwargs
)
@classmethod
def _get_config_validator(cls):
return BaseIntegratorValidator
def __eq__(self, other):
if isinstance(other, BaseIntegrator):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
return eq
return False
@property
@abc.abstractmethod
def position(self):
"""get position vector"""
raise NotImplementedError
@property
@abc.abstractmethod
def velocity(self):
"""Get velocity vector"""
raise NotImplementedError
class Integrator1d(BaseIntegrator):
"""
1d integrator simulation entity
States
x
x_dot
Controls
thrust_x
default range = [-1, 1]
Parameters
----------
m: float
Mass of integrator, by default 1.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to BaseIntegrator.
"""
def __init__(self, m=M_DEFAULT, damping=DAMPING_DEFAULT, integration_method="RK45", **kwargs):
dynamics = IntegratorDynamics(m=m, damping=damping, mode='1d', integration_method=integration_method)
self._state = np.array([])
control_map = {
'thrust_x': 0,
}
super().__init__(dynamics, control_default=np.zeros((1, )), control_min=-1, control_max=1, control_map=control_map, **kwargs)
def __eq__(self, other):
if isinstance(other, Integrator1d):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
return eq
return False
def _build_state(self):
state = np.array([self.config.x, self.config.xdot], dtype=np.float32)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return 0
@property
def z(self):
"""get z"""
return 0
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[1]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return 0
@property
def z_dot(self):
"""get z_dot, the velocity component in the z direction"""
return 0
@property
def position(self):
"""get position vector"""
return self._state[0].copy()
@property
def velocity(self):
"""Get velocity vector"""
return self._state[1].copy()
class Integrator2d(BaseIntegrator):
"""
1d integrator simulation entity
States
x
y
x_dot
y_dot
Controls
thrust_x
default range = [-1, 1]
thrust_y
default range = [-1, 1]
Parameters
----------
m: float
Mass of integrator, by default 1.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to BaseIntegrator.
"""
def __init__(self, m=M_DEFAULT, damping=DAMPING_DEFAULT, integration_method="RK45", **kwargs):
dynamics = IntegratorDynamics(m=m, damping=damping, mode='2d', integration_method=integration_method)
self._state = np.array([])
control_map = {
'thrust_x': 0,
'thrust_y': 0,
}
super().__init__(dynamics, control_default=np.zeros((2, )), control_min=-1, control_max=1, control_map=control_map, **kwargs)
def __eq__(self, other):
if isinstance(other, Integrator1d):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
return eq
return False
def _build_state(self):
state = np.array([self.config.x, self.config.y] + [self.config.xdot, self.config.ydot], dtype=np.float32)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return self._state[1]
@property
def z(self):
"""get z"""
return 0
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[2]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return self._state[3]
@property
def z_dot(self):
"""get z_dot, the velocity component in the z direction"""
return 0
@property
def position(self):
"""get position vector"""
return self._state[0:2].copy()
@property
def velocity(self):
"""Get velocity vector"""
return self._state[2:4].copy()
class Integrator3d(BaseIntegrator):
"""
1d integrator simulation entity
States
x
y
z
x_dot
y_dot
z_dot
Controls
thrust_x
default range = [-1, 1]
thrust_y
default range = [-1, 1]
thrust_z
default range = [-1, 1]
Parameters
----------
m: float
Mass of integrator, by default 1.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to BaseIntegrator.
"""
def __init__(self, m=M_DEFAULT, damping=DAMPING_DEFAULT, integration_method="RK45", **kwargs):
dynamics = IntegratorDynamics(m=m, damping=damping, mode='3d', integration_method=integration_method)
self._state = np.array([])
control_map = {
'thrust_x': 0,
'thrust_y': 0,
'thrust_z': 0,
}
super().__init__(dynamics, control_default=np.zeros((3, )), control_min=-1, control_max=1, control_map=control_map, **kwargs)
def __eq__(self, other):
if isinstance(other, Integrator1d):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
return eq
return False
def _build_state(self):
state = np.array(
[self.config.x, self.config.y, self.config.z] + [self.config.xdot, self.config.ydot, self.config.zdot], dtype=np.float32
)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return self._state[1]
@property
def z(self):
"""get z"""
return self._state[2]
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[3]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return self._state[4]
@property
def z_dot(self):
"""get z_dot, the velocity component in the z direction"""
return self._state[5]
@property
def position(self):
"""get position vector"""
return self._state[0:3].copy()
@property
def velocity(self):
"""Get velocity vector"""
return self._state[3:6].copy()
class IntegratorDynamics(BaseLinearODESolverDynamics):
"""
State transition implementation of 3D integrator dynamics model.
Parameters
----------
m: float
Mass of object, by default 1
damping: float
linear velocity damper. Default is zero
mode : str, optional
dimensionality of dynamics matrices. '1d', '2d', or '3d', by default '1d'
kwargs:
Additional keyword arguments passed to parent class BaseLinearODESolverDynamics constructor
"""
def __init__(self, m=M_DEFAULT, damping=DAMPING_DEFAULT, mode='1d', **kwargs):
self.m = m
self.damping = damping
A, B = generate_dynamics_matrices(self.m, self.damping, mode)
super().__init__(A=A, B=B, **kwargs)
def generate_dynamics_matrices(m: float, damping: float = 0, mode: str = '1d') -> Tuple[np.ndarray, np.ndarray]:
"""Generates A and B Matrices for linearized dynamics of dx/dt = Ax + Bu
Parameters
----------
m : float
mass of object
damping : float, optional
linear velocity damper. Default is zero
mode : str, optional
dimensionality of dynamics matrices. '1d', '2d', or '3d', by default '1d'
Returns
-------
np.ndarray
A dynamics matrix
np.ndarray
B dynamics matrix
"""
assert mode in ['1d', '2d', '3d'], "mode must be on of ['1d', '2d', '3d']"
if mode == '1d':
A = np.array([
[0, 1],
[0, -damping],
], dtype=np.float64)
B = np.array([
[0],
[1 / m],
], dtype=np.float64)
elif mode == '2d':
A = np.array([
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, -damping, 0],
[0, 0, 0, -damping],
], dtype=np.float64)
B = np.array([
[0, 0],
[0, 0],
[1 / m, 0],
[0, 1 / m],
], dtype=np.float64)
else:
A = np.array(
[
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, -damping, 0, 0],
[0, 0, 0, 0, -damping, 0],
[0, 0, 0, 0, 0, -damping],
],
dtype=np.float64
)
B = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1 / m, 0, 0],
[0, 1 / m, 0],
[0, 0, 1 / m],
], dtype=np.float64)
return A, B | /safe-autonomy-dynamics-1.0.1.tar.gz/safe-autonomy-dynamics-1.0.1/safe_autonomy_dynamics/integrators.py | 0.934939 | 0.381853 | integrators.py | pypi |
import math
from typing import Union
import numpy as np
from scipy.spatial.transform import Rotation
from safe_autonomy_dynamics.base_models import BaseEntityValidator, BaseODESolverDynamics, BaseRotationEntity
from safe_autonomy_dynamics.cwh import M_DEFAULT, N_DEFAULT, generate_cwh_matrices
INERTIA_DEFAULT = 0.0573
INERTIA_WHEEL_DEFAULT = 4.1e-5
ANG_ACC_LIMIT_DEFAULT = 0.017453
ANG_VEL_LIMIT_DEFAULT = 0.034907
ACC_LIMIT_WHEEL_DEFAULT = 181.3
VEL_LIMIT_WHEEL_DEFAULT = 576
class CWHRotation2dSpacecraftValidator(BaseEntityValidator):
"""
Validator for CWHRotation2dSpacecraft kwargs.
Parameters
----------
x: [float]
Length 1, x position value. m
y: [float]
Length 1, y position value. m
theta: [float]
Length 1, rotation angle value. rad
x_dot: [float]
Length 1, x velocity value. m/s
y_dot: [float]
Length 1, y velocity value. m/s
theta_dot: [float]
Length 1, rotation rate value. rad/s
Raises
------
ValueError
Improper list lengths for parameters 'x', 'y', 'theta', 'x_dot', 'y_dot', 'theta_dot'
"""
x: float = 0
y: float = 0
theta: float = 0
x_dot: float = 0
y_dot: float = 0
wz: float = 0
class CWHRotation2dSpacecraft(BaseRotationEntity):
"""
Spacecraft with 2D translational Clohessy-Wiltshire dynamics in Hill's reference frame.
In-plane motion (x,y) using +/- x thruster rotated to desired direction
1D rotational dynamics (about z) using a +/- z reaction wheel
States
x
y
theta
x_dot
y_dot
theta_dot
Controls
thrust_x
range = [-1, 1] Newtons
thrust_y
range = [-1, 1] Newtons
moment_z
range = [-0.001, 0.001] Newton-Meters
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12.
inertia: float
Inertia of spacecraft in kg*m^2
ang_acc_limit: float
Angular acceleration limit in rad/s^2
ang_vel_limit: float
Angular velocity limit in rad/s
inertia_wheel: float
Inertia of reaction wheel in kg*m^2
acc_limit_wheel: float
Acceleration limit of reaction wheel in rad/s^2
vel_limit_wheel: float
Velocity limit of reaction wheel in rad/s
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to parent class BaseRotationEntity.
"""
def __init__(
self,
m=M_DEFAULT,
inertia=INERTIA_DEFAULT,
ang_acc_limit=ANG_ACC_LIMIT_DEFAULT,
ang_vel_limit=ANG_VEL_LIMIT_DEFAULT,
inertia_wheel=INERTIA_WHEEL_DEFAULT,
acc_limit_wheel=ACC_LIMIT_WHEEL_DEFAULT,
vel_limit_wheel=VEL_LIMIT_WHEEL_DEFAULT,
n=N_DEFAULT,
integration_method="RK45",
**kwargs
):
self._state = np.array([])
self.partner = {}
self.m = m # kg
self.inertia = inertia # kg*m^2
self.ang_acc_limit = ang_acc_limit # rad/s^2
self.ang_vel_limit = ang_vel_limit # rad/s
self.inertia_wheel = inertia_wheel # kg*m^2
self.acc_limit_wheel = acc_limit_wheel # rad/s^2
self.vel_limit_wheel = vel_limit_wheel # rad/s
self.n = n # rads/s
""" Define limits for angular acceleration, angular velocity, and control inputs """
ang_acc_limit = min(self.ang_acc_limit, self.inertia_wheel * self.acc_limit_wheel / self.inertia)
ang_vel_limit = min(self.ang_vel_limit, self.inertia_wheel * self.vel_limit_wheel / self.inertia)
control_default = np.zeros((3, ))
control_min = np.array([-1, -1, -ang_acc_limit * self.inertia])
control_max = np.array([1, 1, ang_acc_limit * self.inertia])
control_map = {
'thrust_x': 0,
'thrust_y': 1,
'moment_z': 2,
}
""" Create instance of dynamics class """
dynamics = CWHRotation2dDynamics(
m=m, inertia=inertia, ang_acc_limit=ang_acc_limit, ang_vel_limit=ang_vel_limit, n=n, integration_method=integration_method
)
super().__init__(
dynamics, control_default=control_default, control_min=control_min, control_max=control_max, control_map=control_map, **kwargs
)
@classmethod
def _get_config_validator(cls):
return CWHRotation2dSpacecraftValidator
def __eq__(self, other):
if isinstance(other, CWHRotation2dSpacecraft):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
eq = eq and (self.orientation.as_quat == other.orientation.as_quat).all()
eq = eq and (self.angular_velocity == other.angular_velocity).all()
return eq
return False
def register_partner(self, partner: BaseRotationEntity):
"""
Register another entity as this entity's partner. Defines line of communication between entities.
Parameters
----------
partner: BaseEntity
Entity with line of communication to this entity.
Returns
-------
None
"""
self.partner[partner.name] = partner
def _build_state(self):
"""form state vector"""
state = np.array(
[self.config.x, self.config.y, self.config.theta] + [self.config.x_dot, self.config.y_dot, self.config.wz], dtype=np.float32
)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return self._state[1]
@property
def z(self):
"""get z"""
return 0
@property
def q1(self):
"""get first element of quaternion"""
return self.quaternion[0]
@property
def q2(self):
"""get second element of quaternion"""
return self.quaternion[1]
@property
def q3(self):
"""get third element of quaternion"""
return self.quaternion[2]
@property
def q4(self):
"""get fourth element of quaternion (scalar)"""
return self.quaternion[3]
@property
def theta(self):
"""get theta"""
return self._state[2]
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[3]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return self._state[4]
@property
def z_dot(self):
"""get z_dot, the velocity component in the z axis"""
return 0
@property
def wx(self):
"""get wz_dot, the angular velocity component about the x axis"""
return 0
@property
def wy(self):
"""get wz_dot, the angular velocity component about the y axis"""
return 0
@property
def wz(self):
"""get wz_dot, the angular velocity component about the z axis"""
return self._state[5]
@property
def position(self):
"""get 3d position vector"""
position = np.array([self.x, self.y, self.z])
return position
@property
def orientation(self):
"""
Get orientation of CWHRotationSpacecraft
Returns
-------
scipy.spatial.transform.Rotation
Rotation transformation of the entity's local reference frame basis vectors in the global reference frame.
i.e. applying this rotation to [1, 0, 0] yields the entity's local x-axis in the global frame.
"""
return Rotation.from_euler("ZYX", [self.theta, 0, 0])
@property
def quaternion(self):
"""get 4d quaternion
Quaternion order (scalar in 4th element) matches scipy convention of [x,y,z,w]
"""
return self.orientation.as_quat()
@property
def velocity(self):
"""Get 3d velocity vector"""
return np.array([self.x_dot, self.y_dot, self.z_dot])
@property
def angular_velocity(self):
"""Get 3d angular velocity vector"""
return np.array([self.wx, self.wy, self.wz])
class CWHRotation2dDynamics(BaseODESolverDynamics):
"""
State transition implementation of 3D Clohessy-Wiltshire dynamics model.
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12
inertia: float
Inertia of spacecraft in kg*m^2
ang_acc_limit: float
Angular acceleration limit in rad/s^2
ang_vel_limit: float
Angular velocity limit in rad/s
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027
kwargs:
Additional keyword arguments passed to parent class BaseODESolverDynamics constructor
"""
def __init__(
self,
m=M_DEFAULT,
inertia=INERTIA_DEFAULT,
ang_acc_limit=ANG_ACC_LIMIT_DEFAULT,
ang_vel_limit=ANG_VEL_LIMIT_DEFAULT,
n=N_DEFAULT,
state_max: Union[float, np.ndarray] = None,
state_min: Union[float, np.ndarray] = None,
angle_wrap_centers: np.ndarray = None,
**kwargs
):
self.m = m # kg
self.inertia = inertia # kg*m^2
self.ang_acc_limit = ang_acc_limit # rad/s^2
self.ang_vel_limit = ang_vel_limit # rad/s
self.n = n # rads/s
A, B = generate_cwh_matrices(self.m, self.n, '2d')
assert len(A.shape) == 2, f"A must be square matrix. Instead got shape {A.shape}"
assert len(B.shape) == 2, f"A must be square matrix. Instead got shape {B.shape}"
assert A.shape[0] == A.shape[1], f"A must be a square matrix, not dimension {A.shape}"
assert A.shape[1] == B.shape[0], (
"number of columns in A must match the number of rows in B." + f" However, got shapes {A.shape} for A and {B.shape} for B"
)
self.A = np.copy(A)
self.B = np.copy(B)
if state_min is None:
state_min = np.array([-np.inf, -np.inf, -np.inf, -np.inf, -np.inf, -self.ang_vel_limit])
if state_max is None:
state_max = np.array([np.inf, np.inf, np.inf, np.inf, np.inf, self.ang_vel_limit])
if angle_wrap_centers is None:
angle_wrap_centers = np.array([None, None, 0, None, None, None], dtype=float)
super().__init__(state_min=state_min, state_max=state_max, angle_wrap_centers=angle_wrap_centers, **kwargs)
def _compute_state_dot(self, t: float, state: np.ndarray, control: np.ndarray) -> np.ndarray:
x, y, theta, x_dot, y_dot, theta_dot = state
# Form separate state vector for translational state
pos_vel_state_vec = np.array([x, y, x_dot, y_dot], dtype=np.float64)
# Compute the rotated thrust vector
thrust_vector = (
control[0] * np.array([math.cos(theta), math.sin(theta)]) + control[1] * np.array([-math.sin(theta), math.cos(theta)])
)
# Compute derivatives
pos_vel_derivative = np.matmul(self.A, pos_vel_state_vec) + np.matmul(self.B, thrust_vector)
theta_dot_dot = control[2] / self.inertia
# check angular velocity limit
if theta_dot >= self.ang_vel_limit:
theta_dot_dot = min(0, theta_dot_dot)
theta_dot = self.ang_vel_limit
elif theta_dot <= -self.ang_vel_limit:
theta_dot_dot = max(0, theta_dot_dot)
theta_dot = -self.ang_vel_limit
# Form array of state derivatives
state_derivative = np.array(
[pos_vel_derivative[0], pos_vel_derivative[1], theta_dot, pos_vel_derivative[2], pos_vel_derivative[3], theta_dot_dot],
dtype=np.float32
)
return state_derivative | /safe-autonomy-dynamics-1.0.1.tar.gz/safe-autonomy-dynamics-1.0.1/safe_autonomy_dynamics/cwh/rotational_model.py | 0.922124 | 0.54056 | rotational_model.py | pypi |
from typing import Union
import numpy as np
from scipy.spatial.transform import Rotation
from safe_autonomy_dynamics.base_models import BaseEntityValidator, BaseODESolverDynamics, BaseRotationEntity
from safe_autonomy_dynamics.cwh import generate_cwh_matrices
from safe_autonomy_dynamics.utils import number_list_to_np
M_DEFAULT = 12
INERTIA_MATRIX_DEFAULT = np.matrix([[0.0573, 0.0, 0.0], [0.0, 0.0573, 0.0], [0.0, 0.0, 0.0573]])
INERTIA_WHEEL_DEFAULT = 4.1e-5
ANG_ACC_LIMIT_DEFAULT = 0.017453
ANG_VEL_LIMIT_DEFAULT = 0.034907
ACC_LIMIT_WHEEL_DEFAULT = 181.3
VEL_LIMIT_WHEEL_DEFAULT = 576
THRUST_CONTROL_LIMIT_DEFAULT = 1.0
N_DEFAULT = 0.001027
class SixDOFSpacecraftValidator(BaseEntityValidator):
"""
Validator for SixDOFSpacecraft kwargs.
Parameters
----------
x: [float]
Length 1, x position value
y: [float]
Length 1, y position value
z: [float]
Length 1, z position value
x_dot: [float]
Length 1, x velocity value
y_dot: [float]
Length 1, y velocity value
z_dot: [float]
Length 1, z velocity value
q1: [float]
Length 1, first element of quaternion - rotation from body to Hill frame
q2: [float]
Length 1, second element of quaternion value - rotation from body to Hill frame
q3: [float]
Length 1, third element of quaternion value - rotation from body to Hill frame
q4: [float]
Length 1, fourth element of quaternion value (scalar) - rotation from body to Hill frame
Placing the scalar as the 4th element matches the convention used by scipy
wx: [float]
Length 1, x axis angular rate value
wy: [float]
Length 1, y axis angular rate value
wz: [float]
Length 1, z axis angular rate value
Raises
------
ValueError
Improper list lengths for parameters 'x', 'y', 'z', 'x_dot', 'y_dot', 'z_dot', 'q1', 'q2', 'q3', 'q4', 'wx', 'wy', 'wz'
"""
x: float = 0
y: float = 0
z: float = 0
x_dot: float = 0
y_dot: float = 0
z_dot: float = 0
q1: float = 0
q2: float = 0
q3: float = 0
q4: float = 0
wx: float = 0
wy: float = 0
wz: float = 0
class SixDOFSpacecraft(BaseRotationEntity):
"""
Spacecraft with 3D Clohessy-Wiltshire translational dynamics, in Hill's frame and 3D rotational dynamics
States
x, y, z
q1, q2, q3, q4
x_dot, y_dot, z_dot
wx, wy, wz
Controls
thrust_x
range = [-1, 1] Newtons
thrust_y
range = [-1, 1] Newtons
thrust_z
range = [-1, 1] Newtons
moment_x
range = [-0.001, 0.001] Newton-Meters
moment_y
range = [-0.001, 0.001] Newton-Meters
moment_z
range = [-0.001, 0.001] Newton-Meters
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12
inertia_matrix: float
Inertia matrix of spacecraft (3x3) in kg*m^2
ang_acc_limit: float
Angular acceleration limit in rad/s^2
ang_vel_limit: float
Angular velocity limit in rad/s
inertia_wheel: float
Inertia of reaction wheel in kg*m^2
acc_limit_wheel: float
Acceleration limit of reaction wheel in rad/s^2
vel_limit_wheel: float
Velocity limit of reaction wheel in rad/s
thrust_control_limit: float
Thrust control limit in N
body_frame_thrust: bool
Flag indicating the reference frame for the control thrust vector: True- Body frame, False - Hill's frame
by default, True
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to parent class BaseRotationSpacecraft.
body_frame_thrust: bool
Flag indicating the reference frame for the control thrust vector: True- Body frame, False - Hill's frame
"""
def __init__(
self,
m=M_DEFAULT,
inertia_matrix=INERTIA_MATRIX_DEFAULT,
ang_acc_limit=ANG_ACC_LIMIT_DEFAULT,
ang_vel_limit=ANG_VEL_LIMIT_DEFAULT,
inertia_wheel=INERTIA_WHEEL_DEFAULT,
acc_limit_wheel=ACC_LIMIT_WHEEL_DEFAULT,
vel_limit_wheel=VEL_LIMIT_WHEEL_DEFAULT,
thrust_control_limit=THRUST_CONTROL_LIMIT_DEFAULT,
body_frame_thrust=True,
n=N_DEFAULT,
integration_method="RK45",
**kwargs
):
self._state = np.array([])
# Define limits for angular acceleration, angular velocity, and control inputs
ang_acc_limit = number_list_to_np(ang_acc_limit, shape=(3, )) # rad/s^2
ang_vel_limit = number_list_to_np(ang_vel_limit, shape=(3, )) # rad/s
acc_limit_combined = np.zeros((3, ))
vel_limit_combined = np.zeros((3, ))
control_limit = np.zeros((6, ))
for i in range(3):
acc_limit_combined[i] = min(ang_acc_limit[i], inertia_wheel * acc_limit_wheel / inertia_matrix[i, i])
vel_limit_combined[i] = min(ang_vel_limit[i], inertia_wheel * vel_limit_wheel / inertia_matrix[i, i])
control_limit[i] = thrust_control_limit
control_limit[i + 3] = acc_limit_combined[i] * inertia_matrix[i, i]
control_default = np.zeros((6, ))
control_min = -1 * control_limit
control_max = control_limit
control_map = {
'thrust_x': 0,
'thrust_y': 1,
'thrust_z': 2,
'moment_x': 3,
'moment_y': 4,
'moment_z': 5,
}
""" Create instance of dynamics class """
dynamics = SixDOFDynamics(
m=m,
inertia_matrix=inertia_matrix,
ang_acc_limit=acc_limit_combined,
ang_vel_limit=vel_limit_combined,
n=n,
body_frame_thrust=body_frame_thrust,
integration_method=integration_method
)
self.lead = None
self.partner = {}
super().__init__(
dynamics, control_default=control_default, control_min=control_min, control_max=control_max, control_map=control_map, **kwargs
)
@classmethod
def _get_config_validator(cls):
return SixDOFSpacecraftValidator
def __eq__(self, other):
if isinstance(other, SixDOFSpacecraft):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
eq = eq and (self.orientation.as_quat == other.orientation.as_quat).all()
eq = eq and (self.angular_velocity == other.angular_velocity).all()
return eq
return False
def register_lead(self, lead: BaseRotationEntity):
"""
Register another entity as this entity's lead. Defines line of communication between entities.
Parameters
----------
lead: BaseRotationEntity
Entity with line of communication to this entity.
Returns
-------
None
"""
self.lead = lead
def register_partner(self, partner: BaseRotationEntity):
"""
Register another entity as this entity's partner. Defines line of communication between entities.
Parameters
----------
partner: BaseRotationEntity
Entity with line of communication to this entity.
Returns
-------
None
"""
self.partner[partner.name] = partner
def _build_state(self):
"""form state vector"""
state = np.array(
[self.config.x, self.config.y, self.config.z] + [self.config.q1, self.config.q2, self.config.q3, self.config.q4] +
[self.config.x_dot, self.config.y_dot, self.config.z_dot] + [self.config.wx, self.config.wy, self.config.wz],
dtype=np.float32
)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return self._state[1]
@property
def z(self):
"""get z"""
return self._state[2]
@property
def q1(self):
"""get first element of quaternion"""
return self._state[3]
@property
def q2(self):
"""get second element of quaternion"""
return self._state[4]
@property
def q3(self):
"""get third element of quaternion"""
return self._state[5]
@property
def q4(self):
"""get fourth element of quaternion (scalar)"""
return self._state[6]
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[7]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return self._state[8]
@property
def z_dot(self):
"""get z_dot, the velocity component in the z axis"""
return self._state[9]
@property
def wx(self):
"""get wz_dot, the angular velocity component about the x axis"""
return self._state[10]
@property
def wy(self):
"""get wz_dot, the angular velocity component about the y axis"""
return self._state[11]
@property
def wz(self):
"""get wz_dot, the angular velocity component about the z axis"""
return self._state[12]
@property
def position(self):
"""get 3d position vector"""
position = np.array([self.x, self.y, self.z])
return position
@property
def orientation(self):
"""
Get orientation of CWHRotationSpacecraft
Returns
-------
scipy.spatial.transform.Rotation
Rotation transformation of the entity's local reference frame basis vectors in the global reference frame.
i.e. applying this rotation to [1, 0, 0] yields the entity's local x-axis in the global frame.
In this implementation local frame is body, global frame is Hill's frame
Quaternion order (scalar in 4th element) matches scipy convention of [x,y,z,w]
"""
return Rotation.from_quat([self.q1, self.q2, self.q3, self.q4])
@property
def quaternion(self):
"""get 4d quaternion"""
return self.orientation.as_quat()
@property
def velocity(self):
"""Get 3d velocity vector"""
return np.array([self.x_dot, self.y_dot, self.z_dot])
@property
def angular_velocity(self):
"""Get 3d angular velocity vector"""
return np.array([self.wx, self.wy, self.wz])
class SixDOFDynamics(BaseODESolverDynamics):
"""
State transition implementation of 3D Clohessy-Wiltshire dynamics model and 3D rotational dynamics model.
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12
inertia_matrix: float
Inertia matrix of spacecraft (3x3) in kg*m^2
ang_acc_limit: float, list, np.ndarray
Angular acceleration limit in rad/s^2. If array_like, applied to x, y, z elementwise
ang_vel_limit: float, list, np.ndarray
Angular velocity limit in rad/s. If array_like, applied to x, y, z elementwise
thrust_control_limit: float
Thrust control limit in N
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027
body_frame_thrust: bool
Flag indicating the reference frame for the control thrust vector: True- Body frame, False - Hill's frame
by default, True
kwargs:
Additional keyword arguments passed to parent class BaseLinearODESolverDynamics constructor
"""
def __init__(
self,
m,
inertia_matrix,
ang_acc_limit,
ang_vel_limit,
n,
body_frame_thrust=True,
state_max: Union[float, np.ndarray] = None,
state_min: Union[float, np.ndarray] = None,
angle_wrap_centers: np.ndarray = None,
**kwargs
):
self.m = m # kg
self.inertia_matrix = inertia_matrix # kg*m^2
self.n = n # rads/s
self.body_frame_thrust = body_frame_thrust
self.control_thrust_Hill = np.zeros(3, )
ang_acc_limit = number_list_to_np(ang_acc_limit, shape=(3, )) # rad/s^2
ang_vel_limit = number_list_to_np(ang_vel_limit, shape=(3, )) # rad/s
A, B = generate_cwh_matrices(self.m, self.n, '3d')
assert len(A.shape) == 2, f"A must be square matrix. Instead got shape {A.shape}"
assert len(B.shape) == 2, f"A must be square matrix. Instead got shape {B.shape}"
assert A.shape[0] == A.shape[1], f"A must be a square matrix, not dimension {A.shape}"
assert A.shape[1] == B.shape[0], (
"number of columns in A must match the number of rows in B." + f" However, got shapes {A.shape} for A and {B.shape} for B"
)
self.A = np.copy(A)
self.B = np.copy(B)
if state_min is None:
state_min = np.array(
[
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-np.inf,
-ang_vel_limit[0],
-ang_vel_limit[1],
-ang_vel_limit[2]
]
)
if state_max is None:
state_max = np.array(
[
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
ang_vel_limit[0],
ang_vel_limit[1],
ang_vel_limit[2]
]
)
super().__init__(state_min=state_min, state_max=state_max, angle_wrap_centers=angle_wrap_centers, **kwargs)
def _compute_state_dot(self, t: float, state: np.ndarray, control: np.ndarray) -> np.ndarray:
x, y, z, q1, q2, q3, q4, x_dot, y_dot, z_dot, wx, wy, wz = state
# Convert the control thrust to Hill's frame prior to application in the CWH equations
if self.body_frame_thrust:
rotationObj = Rotation.from_quat([q1, q2, q3, q4])
self.control_thrust_Hill = rotationObj.apply(control[0:3])
else:
self.control_thrust_Hill = control[0:3]
# Compute translational derivatives
# Use Clohessey-Wiltshire A and B matrices
pos_vel_state_vec = np.array([x, y, z, x_dot, y_dot, z_dot], dtype=np.float64)
pos_vel_derivative = np.matmul(self.A, pos_vel_state_vec) + np.matmul(self.B, self.control_thrust_Hill)
# Compute rotational derivatives
q_derivative = np.zeros((4, ))
w_derivative = np.zeros((3, ))
q_derivative[0] = 0.5 * (q4 * wx - q3 * wy + q2 * wz)
q_derivative[1] = 0.5 * (q3 * wx + q4 * wy - q1 * wz)
q_derivative[2] = 0.5 * (-q2 * wx + q1 * wy + q4 * wz)
q_derivative[3] = 0.5 * (-q1 * wx - q2 * wy - q3 * wz)
w_derivative[0] = 1 / self.inertia_matrix[0, 0] * ((self.inertia_matrix[1, 1] - self.inertia_matrix[2, 2]) * wy * wz + control[3])
w_derivative[1] = 1 / self.inertia_matrix[1, 1] * ((self.inertia_matrix[2, 2] - self.inertia_matrix[0, 0]) * wx * wz + control[4])
w_derivative[2] = 1 / self.inertia_matrix[2, 2] * ((self.inertia_matrix[0, 0] - self.inertia_matrix[1, 1]) * wx * wy + control[5])
# Form derivative array
state_derivative = np.array(
[
pos_vel_derivative[0],
pos_vel_derivative[1],
pos_vel_derivative[2],
q_derivative[0],
q_derivative[1],
q_derivative[2],
q_derivative[3],
pos_vel_derivative[3],
pos_vel_derivative[4],
pos_vel_derivative[5],
w_derivative[0],
w_derivative[1],
w_derivative[2]
],
dtype=np.float32
)
return state_derivative | /safe-autonomy-dynamics-1.0.1.tar.gz/safe-autonomy-dynamics-1.0.1/safe_autonomy_dynamics/cwh/sixdof_model.py | 0.935678 | 0.568775 | sixdof_model.py | pypi |
from typing import Tuple
import numpy as np
from scipy.spatial.transform import Rotation
from safe_autonomy_dynamics.base_models import BaseEntity, BaseEntityValidator, BaseLinearODESolverDynamics
M_DEFAULT = 12
N_DEFAULT = 0.001027
class CWHSpacecraftValidator(BaseEntityValidator):
"""
Validator for CWHSpacecraft kwargs.
Parameters
----------
x: [float]
Length 1, x position value
y: [float]
Length 1, y position value
z: [float]
Length 1, z position value
x_dot: [float]
Length 1, x velocity value
y_dot: [float]
Length 1, y velocity value
z_dot: [float]
Length 1, z velocity value
Raises
------
ValueError
Improper list lengths for parameters 'x', 'y', 'z', 'x_dot', 'y_dot', 'z_dot'
"""
x: float = 0
y: float = 0
z: float = 0
x_dot: float = 0
y_dot: float = 0
z_dot: float = 0
class CWHSpacecraft(BaseEntity):
"""
3D point mass spacecraft with +/- xyz thrusters and Clohessy-Wiltshire dynamics in Hill's reference frame.
States
x
y
z
x_dot
y_dot
z_dot
Controls
thrust_x
range = [-1, 1] Newtons
thrust_y
range = [-1, 1] Newtons
thrust_z
range = [-1, 1] Newtons
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12.
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027.
integration_method: str
Numerical integration method passed to dynamics model. See BaseODESolverDynamics.
kwargs:
Additional keyword arguments passed to CWHSpacecraftValidator.
"""
def __init__(self, m=M_DEFAULT, n=N_DEFAULT, integration_method="RK45", **kwargs):
dynamics = CWHDynamics(m=m, n=n, integration_method=integration_method)
self._state = np.array([])
control_map = {
'thrust_x': 0,
'thrust_y': 1,
'thrust_z': 2,
}
super().__init__(dynamics, control_default=np.zeros((3, )), control_min=-1, control_max=1, control_map=control_map, **kwargs)
@classmethod
def _get_config_validator(cls):
return CWHSpacecraftValidator
def __eq__(self, other):
if isinstance(other, CWHSpacecraft):
eq = (self.velocity == other.velocity).all()
eq = eq and (self.position == other.position).all()
eq = eq and (self.orientation.as_euler("zyx") == other.orientation.as_euler("zyx")).all()
return eq
return False
def _build_state(self):
state = np.array(
[self.config.x, self.config.y, self.config.z] + [self.config.x_dot, self.config.y_dot, self.config.z_dot], dtype=np.float32
)
return state
@property
def x(self):
"""get x"""
return self._state[0]
@property
def y(self):
"""get y"""
return self._state[1]
@property
def z(self):
"""get z"""
return self._state[2]
@property
def x_dot(self):
"""get x_dot, the velocity component in the x direction"""
return self._state[3]
@property
def y_dot(self):
"""get y_dot, the velocity component in the y direction"""
return self._state[4]
@property
def z_dot(self):
"""get z_dot, the velocity component in the z direction"""
return self._state[5]
@property
def position(self):
"""get 3d position vector"""
return self._state[0:3].copy()
@property
def orientation(self):
"""
Get orientation of CWHSpacecraft, which is always an identity rotation as a point mass model doesn't rotate.
Returns
-------
scipy.spatial.transform.Rotation
Rotation transformation of the entity's local reference frame basis vectors in the global reference frame.
i.e. applying this rotation to [1, 0, 0] yields the entity's local x-axis in the global frame.
"""
# always return a no rotation quaternion as points do not have an orientation
return Rotation.from_quat([0, 0, 0, 1])
@property
def velocity(self):
"""Get 3d velocity vector"""
return self._state[3:6].copy()
class CWHDynamics(BaseLinearODESolverDynamics):
"""
State transition implementation of 3D Clohessy-Wiltshire dynamics model.
Parameters
----------
m: float
Mass of spacecraft in kilograms, by default 12
n: float
Orbital mean motion of Hill's reference frame's circular orbit in rad/s, by default 0.001027
kwargs:
Additional keyword arguments passed to parent class BaseLinearODESolverDynamics constructor
"""
def __init__(self, m=M_DEFAULT, n=N_DEFAULT, **kwargs):
self.m = m # kg
self.n = n # rads/s
A, B = generate_cwh_matrices(self.m, self.n, '3d')
super().__init__(A=A, B=B, **kwargs)
def generate_cwh_matrices(m: float, n: float, mode: str = '2d') -> Tuple[np.ndarray, np.ndarray]:
"""Generates A and B Matrices from Clohessy-Wiltshire linearized dynamics of dx/dt = Ax + Bu
Parameters
----------
m : float
mass in kg of spacecraft
n : float
orbital mean motion in rad/s of current Hill's reference frame
mode : str, optional
dimensionality of dynamics matrices. '2d' or '3d', by default '2d'
Returns
-------
np.ndarray
A dynamics matrix
np.ndarray
B dynamics matrix
"""
assert mode in ['2d', '3d'], "mode must be on of ['2d', '3d']"
if mode == '2d':
A = np.array([
[0, 0, 1, 0],
[0, 0, 0, 1],
[3 * n**2, 0, 0, 2 * n],
[0, 0, -2 * n, 0],
], dtype=np.float64)
B = np.array([
[0, 0],
[0, 0],
[1 / m, 0],
[0, 1 / m],
], dtype=np.float64)
else:
A = np.array(
[
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[3 * n**2, 0, 0, 0, 2 * n, 0],
[0, 0, 0, -2 * n, 0, 0],
[0, 0, -n**2, 0, 0, 0],
],
dtype=np.float64
)
B = np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1 / m, 0, 0],
[0, 1 / m, 0],
[0, 0, 1 / m],
], dtype=np.float64)
return A, B | /safe-autonomy-dynamics-1.0.1.tar.gz/safe-autonomy-dynamics-1.0.1/safe_autonomy_dynamics/cwh/point_model.py | 0.961043 | 0.643413 | point_model.py | pypi |
__title__ = 'safe-cast'
__version__ = '0.3.4'
__version_info__ = tuple(__version__.split('.'))
__author__ = 'jefft@tune.com'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2018 TUNE, Inc.'
import numpy
def safe_cast(val, to_type, default=None):
"""Safely cast a value to type, and if failed, returned default if exists.
Optional: Pass default value. Returned if casting fails.
:param val: Value to be cast.
:param to_type: Safely cast to a specific type.
:param default: Default if casting fails.
:return: Return casted value or default.
"""
if val is None:
return default
try:
return to_type(val)
except ValueError as ex:
if default is None:
raise ValueError(
"Error: {0}, Value: {1}, Cast: {2} to {3}".format(
str(ex).capitalize(),
str(val),
type(val).__name__,
str(to_type.__name__)
)
)
return default
except TypeError as ex:
if default is None:
raise TypeError(
"Error: {0}, Value: {1}, Cast: {2} to {3}".format(
str(ex).capitalize(),
str(val),
type(val).__name__,
str(to_type.__name__)
)
)
return default
def safe_str(val, default=None):
"""Safely cast a value to a string.
Optional: Pass default value. Returned if casting fails.
:param val: Value to be cast to string.
:param default: Default if casting fails.
:return: Return string casted value or default.
"""
if val is None:
return default if default is not None else ''
return safe_cast(val, str, default)
def safe_float(val, ndigits=2, default=None):
"""Safely cast a value to float, remove ',' if exists to ensure strings "1,234.5" are transformed to become "1234.5".
Optional: Pass default value. Returned if casting fails.
:param val: Value to be cast to float.
:param ndigits: Number of digits in float.
:param default: Default if casting fails.
:return: Return float casted value or default.
"""
if not val: # None or '' or ""
return default if default is not None else 0.0
_val = val.replace(',', '') if type(val) == str else val
return numpy.around(safe_cast(_val, float, default), ndigits)
def safe_int(val, default=None):
"""Safely cast a value to an integer.
Optional: Pass default value. Returned if casting fails.
:param val: Value to be cast to int.
:param default: Default if casting fails.
:return: Return int casted value or default.
"""
if not val: # None or '' or ""
return default if default is not None else 0
return safe_cast(safe_float(val, ndigits=0, default=default), int, default)
def safe_dict(val, default=None):
"""Safely cast a value to a dictionary.
Optional: Pass default value. Returned if casting fails.
:param val: Value to be cast to dictionary.
:param default: Default if casting fails.
:return: Return dictionary casted value or default.
"""
if not val: # None or '' or ""
return default if default is not None else {}
return safe_cast(val, dict, default)
def safe_fraction(fraction, ndigits=2, default=None):
try:
return safe_float(fraction, ndigits, default)
except ValueError:
try:
num, denom = fraction.split('/')
except ValueError:
return None
try:
leading, num = num.split(' ')
except ValueError:
return safe_float(float(num) / float(denom), ndigits, default)
if float(leading) < 0:
sign_mult = -1
else:
sign_mult = 1
return safe_float(float(leading) + sign_mult * (float(num) / float(denom)), ndigits, default)
def safe_smart_cast(val):
"""Safely cast a value to the best matching type.
Optional: Pass default value. Returned if casting fails.
:param val: Value to be smartly cast.
:return: Typed value
"""
to_type = type(val)
if to_type == str:
return safe_str(val)
if to_type == dict:
return safe_dict(val)
if to_type == int:
return safe_int(val)
if to_type == float:
return safe_float(val)
return safe_str(str(val))
def safe_cost(val):
"""Safety cast value to a cost value which is a floating value with 4 digits.
:param val: Value to be cast to cost of type float.
:return: float
"""
return safe_float(val, ndigits=4) | /safe_cast-0.3.4-py3-none-any.whl/safe_cast/__init__.py | 0.858021 | 0.354964 | __init__.py | pypi |
[](https://badge.fury.io/py/safe-cli)
[](https://github.com/gnosis/safe-cli/actions/workflows/python.yml)
[](https://coveralls.io/github/gnosis/safe-cli?branch=master)


# Safe-CLI
Command line utility for **Safe** contracts. Use it to manage your **Safe** easily from the command line
## Using with docker
If you have **Docker** installed on your system, you can just run:
```bash
docker run -it safeglobal/safe-cli safe-creator
```
for creating Safes
or
```bash
docker run -it safeglobal/safe-cli safe-cli
```
to run the actual **safe-cli**
## Installing
**Python >= 3.7** is required. **Python 3.10** is recommended.
```bash
pip3 install -U safe-cli
```
## Using
```bash
safe-cli <checksummed_safe_address> <ethereum_node_url>
```
Then you should be on the prompt and see information about the Safe, like the owners, version, etc.
Next step would be loading some owners for the Safe. At least `threshold` owners need to be loaded to do operations
on the Safe and at least one of them should have funds for sending transactions.
There're 3 operation modes:
- **blockchain**: The default mode, transactions are sent to blockchain.
- **tx-service**: Use `tx-service` command to enable it. Transactions are sent to the Gnosis Transaction Service (if available on the network), so you will be able to see it on the Safe web interface/mobile apps. At least one signer is needed to send transactions to the service. Txs are **not executed**.
- **relay-service**: Use `relay-service [optional-gas-token]` to enable it. Sends transactions trough the Gnosis Relay Service (if available on the network). If a optional gas token is set, it will be used to send transactions.
Loading owners is not needed if you just want to do `read-only` operations.
To load owners:
```
> load_cli_owners <account_private_key>
Loaded account 0xab...cd with balance=123 ether
Set account 0xab..cd as default sender of txs
```
You can also load owners from an environment variable. Before running the `safe-cli`:
```bash
export MY_PRIVATE_KEY=YOUR_EOA_PRIVATE_KEY
```
Then:
```
> load_cli_owners MY_PRIVATE_KEY
Loaded account 0xab...cd with balance=123 ether
Set account 0xab..cd as default sender of txs
```
To check the loaded owners:
```
> show_cli_owners
```
To unload an owner:
```
> unload_cli_owners <ethereum_checksummed_address>
```
Operations currently supported:
- `send_custom <address> <value-wei> <data-hex-str> [--delegate] [--safe-nonce <int>]`:
Sends a custom transaction from the Safe to a contract. If `--delegate` is set a `delegatecall`
will be triggered.
- `send_ether <address> <value-wei> [--safe-nonce <int>]`:
Sends ether from the Safe to another account
- `send_erc20 <address> <token_address> <value> [--safe-nonce <int>]`:
Send ERC20 token from the Safe to another account
- `approve_hash <keccak-hexstr-hash> <sender-address>`: Approves a `safe-tx-hash` for the provided sender address.
Sender private key must be loaded first.
- `add_owner <address>`: Adds a new owner `address` to the Safe.
- `remove_owner <address>`: Removes an owner `address` from the Safe.
- `change_threshold <integer>`: Changes the `threshold` of the Safe.
- `enable_module <address>`: Enable module `address`
- `disable_module <address>`: Disable module `address`
- `change_fallback_handler <address>`: Updates the fallback handler to be `address`. Supported by Safes with `version >= v1.1.0`. **WARNING: DON'T USE
THIS IF YOU DON'T KNOW WHAT YOU ARE DOING. ALL YOUR FUNDS COULD BE LOST**
- `change_guard <address>`: Updates the guard to be `address`. Supported by Safes with `version >= v1.3.0`. **WARNING: DON'T USE
THIS IF YOU DON'T KNOW WHAT YOU ARE DOING. ALL YOUR FUNDS COULD BE LOST**
- `change_master_copy <address>`: Updates the master copy to be `address`. It's used to update the Safe. **WARNING: DON'T USE
THIS IF YOU DON'T KNOW WHAT YOU ARE DOING. ALL YOUR FUNDS COULD BE LOST**
- `update`: Updates the Safe to the latest version (if you are on a known network like `Goerli` or `Mainnet`).
**WARNING: DON'T USE THIS IF YOU DON'T KNOW WHAT YOU ARE DOING. ALL YOUR FUNDS COULD BE LOST**
Operations on `tx-service` mode, requires a Safe Transaction Service working on the network
(Mainnet, Gnosis Chain, Goerli, Polygon...):
- `balances`: Returns a list of balances for ERC20 tokens and ether.
- `history`: History of multisig transactions (including pending).
- `execute-tx <safe-tx-hash>`: Execute a pending tx with enough signatures.
- `sign-tx <safe-tx-hash>`: Sign a tx with the loaded owners for the provided `SafeTxHash`.
- `batch-txs <safe-nonce> <safe-tx-hash> [ <safe-tx-hash> ... ]`: Batch transactions into one Multisig
Transaction using the provided `safe-nonce`. **Any safe-tx can be used**: transactions from other Safes, transactions
already executed, transactions pending for execution... Only limitation is that
- **transactions from other networks cannot be used**. Batching order will follow the same order of the
`safe-tx-hashes` provided.
- `get_delegates`: Returns a list of delegates for the Safe.
- `add_delegate <address> <label> <signer-address>`: Adds a new delegate `address` to the Safe.
- `remove_delegate <address> <signer-address>`: Removes a delegate `address` from the Safe.
- `drain <address>`: Sends all ether and ERC20 funds to the provided account.
If the information in the information bar is outdated or there's any problem you can force the `safe-cli` to update
the information about the Safe using:
```
> refresh
```
## Creating a new Safe
Use `safe-creator <node_url> <private_key> --owners <checksummed_address_1> <checksummed_address_2> --threshold <uint> --salt-nonce <uint256>`.
Example:
```
safe-creator https://goerli.infura.io/v3/token $PRIVATE_KEY --owners 0x848EF06Bb9d1bc79Bb3B04b7Ea0e251C6E788d7c --threshold 1
```
## Demo
For this demo, `PRIVATE_KEY` environment variable was set to a _EOA_ private key (owner of a a previously created and outdated Safe)
and `ETHEREUM_NODE_URL` to a http goerli node.
At first, Safe is updated to the last version and then `123 Wei` are sent to the owner of the Safe (it could be any other address).
**Be careful when using `update` command, as it can leave your Safe funds stuck. Safe CLI is still a beta**
[](https://asciinema.org/a/346692)
## Use custom contracts
**Safe-cli** comes with the official Safe contract addresses deployed on Mainnet, Rinkeby, Kovan and Goerli
configured by default. If you want to use your own you can edit the file `safe_cli/safe_addresses.py`
Be careful when modifying these addresses, the funds in a Safe can get stuck if an invalid address it's used when updating
to an invalid Safe Master Copy.
## Safe contracts
- [Safe contracts](https://github.com/gnosis/safe-contracts)
- [Safe contracts deployment info and addreses](https://github.com/gnosis/safe-deployments/tree/main/src/assets)
## Setting up for developing
If you miss something and want to send us a PR:
```bash
git clone https://github.com/gnosis/safe-cli.git
cd safe-cli
stat venv 2>/dev/null || python3 -m venv venv
source venv/bin/activate && pip install -r requirements-dev.txt
pre-commit install -f
```
## Contributors
- [Pedro Arias Ruiz](https://github.com/AsiganTheSunk)
- [Uxío Fuentefría](https://github.com/uxio0) (uxio@gnosis.io)
| /safe_cli-0.7.1.tar.gz/safe_cli-0.7.1/README.md | 0.432063 | 0.942507 | README.md | pypi |
from prompt_toolkit.formatted_text import HTML
SAFE_ARGUMENT_COLOR = "em"
SAFE_EMPTY_ARGUMENT_COLOR = "ansimagenta"
safe_commands_arguments = {
"add_delegate": "<address> <label> <signer-address>",
"add_owner": "<address> [--threshold <int>]",
"approve_hash": "<safe-tx-hash> <address>",
"balances": "(read-only)",
"batch-txs": "<safe-nonce> <safe-tx-hash> [ <safe-tx-hash> ... ]",
"change_fallback_handler": "<address>",
"change_guard": "<address>",
"change_master_copy": "<address>",
"change_threshold": "<address>",
"disable_module": "<address>",
"enable_module": "<address>",
"execute-tx": "<safe-tx-hash>",
"get_delegates": "(read-only)",
"get_nonce": "(read-only)",
"get_owners": "(read-only)",
"get_threshold": "(read-only)",
"history": "(read-only)",
"info": "(read-only)",
"load_cli_owners": "<account-private-key> [<account-private-key>...]",
"load_cli_owners_from_words": "<word_1> <word_2> ... <word_12>",
"refresh": "",
"remove_delegate": "<address> <signer-address>",
"remove_owner": "<address> [--threshold <int>]",
"send_custom": "<address> <value-wei> <data> [--delegate] [--safe-nonce <int>]",
"send_erc20": "<address> <token-address> <value-wei> [--safe-nonce <int>]",
"send_erc721": "<address> <token-address> <token-id> [--safe-nonce <int>]",
"send_ether": "<address> <value-wei> [--safe-nonce <int>]",
"show_cli_owners": "(read-only)",
"sign-tx": "<safe-tx-hash>",
"unload_cli_owners": "<address> [<address>...]",
"update": "",
"blockchain": "",
"relay-service": "[<token-address>]",
"tx-service": "",
"drain": "<address>",
}
safe_commands = list(safe_commands_arguments.keys())
safe_color_arguments = {
"(read-only)": SAFE_ARGUMENT_COLOR,
"<account-private-key>": SAFE_ARGUMENT_COLOR,
"<address>": SAFE_ARGUMENT_COLOR,
"<hex-str>": SAFE_ARGUMENT_COLOR,
"<integer>": SAFE_ARGUMENT_COLOR,
"<safe-tx-hash>": SAFE_ARGUMENT_COLOR,
"<token-address>": SAFE_ARGUMENT_COLOR,
"<token-id>": SAFE_ARGUMENT_COLOR,
"<value-wei>": SAFE_ARGUMENT_COLOR,
}
meta = {
"approve_hash": HTML(
"<b>approve_hash</b> will approve a safe-tx-hash for the provided sender address. "
"Sender private key must be loaded first"
),
"balances": HTML(
"<b>balances</b> will return the balance of Ether and ERC20 tokens of the Safe "
"(if tx service available for the network)"
),
"history": HTML(
"<b>history</b> will return information of last transactions for the Safe "
"(if tx service available for the network)"
),
"batch-txs": HTML(
"<b>batch-txs</b> will take pending or executed transactions by safe tx hash and will create a new"
"transaction using the provided safe nonce"
),
"execute-tx": HTML(
"Take a pending transaction from Safe Tx Service and execute it using a loaded sender"
),
"sign-tx": HTML(
"<b>sign-tx</b> will sign the provided safeTxHash using the owners loaded on the CLI"
),
"info": HTML(
"<b>info</b> will return all the information available for a Safe, with Gnosis Tx Service and "
"Etherscan links if the network is supported"
),
"show_cli_owners": HTML(
"Command <b>show_cli_owners</b> will return a list of loaded <u><address></u> "
"account owners."
),
"get_owners": HTML(
"Command <b>get_owners</b> will return a list of check-summed <u><address></u> "
"account owners."
),
"get_delegates": HTML(
"Command <b>get_delegates</b> will return information about the current delegates."
),
"change_owner": HTML(
"Command <b>change_owner</b> will change an old account <u><address></u> for the new "
"check-summed <u><address></u> account."
),
"add_owner": HTML(
"Command <b>add_owner</b> will add a check-summed <u><address></u> owner account."
),
"remove_owner": HTML(
"Command <b>remove_owner</b> will remove an old account <u><address></u> from the "
"current loaded safe."
),
"add_delegate": HTML(
"Command <b>add_delegate</b> will add a check-summed <u><address></u> delegate account."
),
"remove_delegate": HTML(
"Command <b>remove_delegate</b> will remove a delegate <u><address></u> from the "
"current loaded safe."
),
"enable_module": HTML(
"Command <b>enable_module</b> will enable a check-summed <u><address></u> module."
),
"disable_module": HTML(
"Command <b>disable_module</b> will disable a check-summed <u><address></u> module."
),
"get_threshold": HTML(
"Command <b>get_threshold</b> will return the threshold <u><value></u> for"
" the current loaded safe."
),
"get_nonce": HTML(
"Command <b>get_nonce</b> will return the nonce <u><value></u> for "
"the current loaded safe."
),
"change_threshold": HTML(
"Command <b>change_threshold</b> will change the current threshold <u><integer></u> "
"value for the loaded safe."
),
"send_custom": HTML(
"Command <b>send_custom</b> will try to send a custom tx to a check-summed account. Set value "
"to 0 if you don't want to send ether. <b>--delegate</b> can be added to send a DELEGATECALL"
),
"send_ether": HTML(
"Command <b>send_ether</b> will try to send Wei <u><value></u> to a check-summed account"
" <u><address></u> if enough funds are found, withing the current loaded safe."
),
"send_erc20": HTML(
"Command <b>send_erc20</b> will try to send a Token <u><value></u> from a check-summed "
"<u><token-address></u>, to a check-summed account <u><address></u> if enough funds"
" are found, withing the current loaded safe."
),
"send_erc721": HTML(
"Command <b>send_erc721</b> will try to send a ERC 721 Token <u><value></u>"
"from a check-summed <u><token-address></u>, to a check-summed account "
"<u><address></u>."
),
"unload_cli_owners": HTML(
"Command <b>unload_cli_owners</b> will unload a check-summed <u><address></u> "
"from the current loaded account owners."
),
"load_cli_owners": HTML(
"Command <b>load_cli_owners</b> will try to load a new owner via "
"<u><account-private-key></u>."
),
"load_cli_owners_from_words": HTML(
"Command <b>load_cli_owners_from_words</b> will try to load owners via"
"<u>seed_words</u>. Only relevant accounts(owners) will be loaded"
),
"refresh": HTML(
"Command <b>refresh</b> will refresh the information for the current loaded safe."
),
"change_master_copy": HTML(
"Command <b>change_master_copy</b> will change the current MasterCopy of the "
"Safe Contract <b>[DO NOT CALL THIS FUNCTION, UNLESS YOU KNOW WHAT YOU ARE DOING. "
"ALL YOUR FUNDS COULD BE LOST]</b>."
),
"change_fallback_handler": HTML(
"Command <b>change_fallback_handler</b> will change the current "
"fallbackHandler for Safes with version >= 1.1.0 "
"<b>[DO NOT CALL THIS FUNCTION, UNLESS YOU KNOW WHAT YOU ARE DOING. "
"ALL YOUR FUNDS COULD BE LOST]</b>."
),
"change_guard": HTML(
"Command <b>change_guard</b> will change the current "
"guard for Safes with version >= 1.3.0 "
"<b>[DO NOT CALL THIS FUNCTION, UNLESS YOU KNOW WHAT YOU ARE DOING. "
"ALL YOUR FUNDS COULD BE LOST]</b>."
),
"update": HTML(
"Command <b>update</b> will upgrade the Safe master copy to the latest version"
),
"blockchain": HTML(
"<b>blockchain</b> sets the default mode for tx service. Transactions will be "
"sent to blockchain"
),
"relay-service": HTML(
"<b>relay-service</b> enables relay-service integration. Transactions will be sent to the "
"relay-service so fees will be deducted from the Safe instead of from the sender. "
"A payment token can be provided to be used instead of Ether (stable coins, WETH and OWL"
),
"tx-service": HTML(
"<b>tx-service</b> enables tx-service integration. Transactions will be sent to the tx-service "
"instead of blockchain, so they will show up on the interface"
),
"drain": HTML(
"Command <b>drain</b> will try to send all assets ether and ERC20 to a check-summed account"
),
} | /safe_cli-0.7.1.tar.gz/safe_cli-0.7.1/safe_cli/safe_completer_constants.py | 0.4436 | 0.252565 | safe_completer_constants.py | pypi |
from typing import Optional
from hexbytes import HexBytes
from prompt_toolkit import HTML, print_formatted_text
from gnosis.eth.constants import NULL_ADDRESS
from gnosis.safe import InvalidInternalTx, SafeOperation, SafeTx
from safe_cli.utils import yes_or_no_question
from .safe_operator import SafeOperator, SafeServiceNotAvailable
class SafeRelayOperator(SafeOperator):
def __init__(self, address: str, node_url: str, gas_token: Optional[str] = None):
super().__init__(address, node_url)
self.gas_token = gas_token or NULL_ADDRESS
if not self.safe_relay_service:
raise SafeServiceNotAvailable(
f"Cannot configure relay service for network {self.network.name}"
)
def approve_hash(self, hash_to_approve: HexBytes, sender: str) -> bool:
raise NotImplementedError("Not supported when using relay")
def prepare_and_execute_safe_transaction(
self,
to: str,
value: int,
data: bytes,
operation: SafeOperation = SafeOperation.CALL,
safe_nonce: Optional[int] = None,
) -> bool:
safe_tx = self.prepare_safe_transaction(
to, value, data, operation, safe_nonce=safe_nonce
)
return self.post_transaction_to_relay_service(safe_tx)
def post_transaction_to_relay_service(self, safe_tx: SafeTx) -> bool:
safe_tx.gas_token = self.gas_token
estimation = self.safe_relay_service.get_estimation(self.address, safe_tx)
safe_tx.base_gas = estimation["baseGas"]
safe_tx.safe_tx_gas = estimation["safeTxGas"]
safe_tx.gas_price = estimation["gasPrice"]
last_used_nonce: Optional[int] = estimation["lastUsedNonce"]
safe_tx.safe_nonce = 0 if last_used_nonce is None else last_used_nonce + 1
safe_tx.refund_receiver = estimation["refundReceiver"] or NULL_ADDRESS
safe_tx.signatures = b"" # Sign transaction again
self.sign_transaction(safe_tx)
if yes_or_no_question("Do you want to execute tx " + str(safe_tx)):
try:
call_result = safe_tx.call(self.default_sender.address)
print_formatted_text(
HTML(f"Result: <ansigreen>{call_result}</ansigreen>")
)
transaction_data = self.safe_relay_service.send_transaction(
self.address, safe_tx
)
tx_hash = transaction_data["txHash"]
print_formatted_text(
HTML(
f"<ansigreen>Safe Relay has queued transaction with "
f"transaction-hash <b>{tx_hash}</b></ansigreen>"
)
)
return True
except InvalidInternalTx as invalid_internal_tx:
print_formatted_text(
HTML(
f"Result: <ansired>InvalidTx - {invalid_internal_tx}</ansired>"
)
)
return False | /safe_cli-0.7.1.tar.gz/safe_cli-0.7.1/safe_cli/operators/safe_relay_operator.py | 0.8874 | 0.162979 | safe_relay_operator.py | pypi |
from typing import TypedDict
from urllib.parse import urljoin
import requests
from eth_typing import ChecksumAddress, HexStr
from gnosis.eth.ethereum_client import EthereumNetwork
from gnosis.safe import SafeTx
from gnosis.safe.signatures import signature_split
from .base_api import BaseAPI, BaseAPIException
class RelayEstimation(TypedDict):
safeTxGas: int
baseGas: int
gasPrice: int
lastUsedNonce: int
gasToken: ChecksumAddress
refundReceiver: ChecksumAddress
class RelaySentTransaction(TypedDict):
safeTxHash: HexStr
txHash: HexStr
class RelayServiceApi(BaseAPI):
URL_BY_NETWORK = {
EthereumNetwork.MAINNET: "https://safe-relay.gnosis.io",
EthereumNetwork.RINKEBY: "https://safe-relay.rinkeby.gnosis.io",
EthereumNetwork.GOERLI: "https://safe-relay.goerli.gnosis.io",
}
def send_transaction(
self, safe_address: str, safe_tx: SafeTx
) -> RelaySentTransaction:
url = urljoin(self.base_url, f"/api/v1/safes/{safe_address}/transactions/")
signatures = []
for i in range(len(safe_tx.signatures) // 65):
v, r, s = signature_split(safe_tx.signatures, i)
signatures.append(
{
"v": v,
"r": r,
"s": s,
}
)
data = {
"to": safe_tx.to,
"value": safe_tx.value,
"data": safe_tx.data.hex() if safe_tx.data else None,
"operation": safe_tx.operation,
"gasToken": safe_tx.gas_token,
"safeTxGas": safe_tx.safe_tx_gas,
"dataGas": safe_tx.base_gas,
"gasPrice": safe_tx.gas_price,
"refundReceiver": safe_tx.refund_receiver,
"nonce": safe_tx.safe_nonce,
"signatures": signatures,
}
response = requests.post(url, json=data)
if not response.ok:
raise BaseAPIException(f"Error posting transaction: {response.content}")
else:
return RelaySentTransaction(response.json())
def get_estimation(self, safe_address: str, safe_tx: SafeTx) -> RelayEstimation:
"""
:param safe_address:
:param safe_tx:
:return: RelayEstimation
"""
url = urljoin(
self.base_url, f"/api/v2/safes/{safe_address}/transactions/estimate/"
)
data = {
"to": safe_tx.to,
"value": safe_tx.value,
"data": safe_tx.data.hex() if safe_tx.data else None,
"operation": safe_tx.operation,
"gasToken": safe_tx.gas_token,
}
response = requests.post(url, json=data)
if not response.ok:
raise BaseAPIException(f"Error posting transaction: {response.content}")
else:
response_json = response.json()
# Convert values to int
for key in ("safeTxGas", "baseGas", "gasPrice"):
response_json[key] = int(response_json[key])
return RelayEstimation(response_json) | /safe_cli-0.7.1.tar.gz/safe_cli-0.7.1/safe_cli/api/relay_service_api.py | 0.741206 | 0.225204 | relay_service_api.py | pypi |
import time
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urljoin
import requests
from eth_account.signers.local import LocalAccount
from hexbytes import HexBytes
from web3 import Web3
from gnosis.eth.ethereum_client import EthereumNetwork
from gnosis.safe import SafeTx
from .base_api import BaseAPI, BaseAPIException
class TransactionServiceApi(BaseAPI):
URL_BY_NETWORK = {
EthereumNetwork.MAINNET: "https://safe-transaction-mainnet.safe.global",
EthereumNetwork.ARBITRUM_ONE: "https://safe-transaction-arbitrum.safe.global",
EthereumNetwork.AURORA_MAINNET: "https://safe-transaction-aurora.safe.global",
EthereumNetwork.AVALANCHE_C_CHAIN: "https://safe-transaction-avalanche.safe.global",
EthereumNetwork.BINANCE_SMART_CHAIN_MAINNET: "https://safe-transaction-bsc.safe.global",
EthereumNetwork.ENERGY_WEB_CHAIN: "https://safe-transaction-ewc.safe.global",
EthereumNetwork.GOERLI: "https://safe-transaction-goerli.safe.global",
EthereumNetwork.POLYGON: "https://safe-transaction-polygon.safe.global",
EthereumNetwork.OPTIMISM: "https://safe-transaction-optimism.safe.global",
EthereumNetwork.ENERGY_WEB_VOLTA_TESTNET: "https://safe-transaction-volta.safe.global",
EthereumNetwork.GNOSIS: "https://safe-transaction-gnosis-chain.safe.global",
}
@classmethod
def create_delegate_message_hash(cls, delegate_address: str) -> str:
totp = int(time.time()) // 3600
hash_to_sign = Web3.keccak(text=delegate_address + str(totp))
return hash_to_sign
def data_decoded_to_text(self, data_decoded: Dict[str, Any]) -> Optional[str]:
"""
Decoded data decoded to text
:param data_decoded:
:return:
"""
if not data_decoded:
return None
method = data_decoded["method"]
parameters = data_decoded.get("parameters", [])
text = ""
for (
parameter
) in parameters: # Multisend or executeTransaction from another Safe
if "decodedValue" in parameter:
text += (
method
+ ":\n - "
+ "\n - ".join(
[
self.data_decoded_to_text(
decoded_value.get("decodedData", {})
)
for decoded_value in parameter.get("decodedValue", {})
]
)
+ "\n"
)
if text:
return text.strip()
else:
return (
method
+ ": "
+ ",".join([str(parameter["value"]) for parameter in parameters])
)
def get_balances(self, safe_address: str) -> List[Dict[str, Any]]:
response = self._get_request(f"/api/v1/safes/{safe_address}/balances/")
if not response.ok:
raise BaseAPIException(f"Cannot get balances: {response.content}")
else:
return response.json()
def get_safe_transaction(
self, safe_tx_hash: bytes
) -> Tuple[SafeTx, Optional[HexBytes]]:
"""
:param safe_tx_hash:
:return: SafeTx and `tx-hash` if transaction was executed
"""
safe_tx_hash = HexBytes(safe_tx_hash).hex()
response = self._get_request(f"/api/v1/multisig-transactions/{safe_tx_hash}/")
if not response.ok:
raise BaseAPIException(
f"Cannot get transaction with safe-tx-hash={safe_tx_hash}: {response.content}"
)
else:
result = response.json()
# TODO return tx-hash if executed
signatures = self.parse_signatures(result)
return (
SafeTx(
self.ethereum_client,
result["safe"],
result["to"],
int(result["value"]),
HexBytes(result["data"]) if result["data"] else b"",
int(result["operation"]),
int(result["safeTxGas"]),
int(result["baseGas"]),
int(result["gasPrice"]),
result["gasToken"],
result["refundReceiver"],
signatures=signatures if signatures else b"",
safe_nonce=int(result["nonce"]),
),
HexBytes(result["transactionHash"])
if result["transactionHash"]
else None,
)
def parse_signatures(self, raw_tx: Dict[str, Any]) -> Optional[HexBytes]:
if raw_tx["signatures"]:
# Tx was executed and signatures field is populated
return raw_tx["signatures"]
elif raw_tx["confirmations"]:
# Parse offchain transactions
return b"".join(
[
HexBytes(confirmation["signature"])
for confirmation in sorted(
raw_tx["confirmations"], key=lambda x: int(x["owner"], 16)
)
if confirmation["signatureType"] == "EOA"
]
)
def get_transactions(self, safe_address: str) -> List[Dict[str, Any]]:
response = self._get_request(
f"/api/v1/safes/{safe_address}/multisig-transactions/"
)
if not response.ok:
raise BaseAPIException(f"Cannot get transactions: {response.content}")
else:
return response.json().get("results", [])
def get_delegates(self, safe_address: str) -> List[Dict[str, Any]]:
# 200 delegates should be enough so we don't paginate
response = self._get_request(
f"/api/v1/delegates/?safe={safe_address}&limit=200"
)
if not response.ok:
raise BaseAPIException(f"Cannot get delegates: {response.content}")
else:
return response.json().get("results", [])
def post_signatures(self, safe_tx_hash: bytes, signatures: bytes) -> None:
safe_tx_hash = HexBytes(safe_tx_hash).hex()
response = self._post_request(
f"/api/v1/multisig-transactions/{safe_tx_hash}/confirmations/",
payload={"signature": HexBytes(signatures).hex()},
)
if not response.ok:
raise BaseAPIException(
f"Cannot post signatures for tx with safe-tx-hash={safe_tx_hash}: {response.content}"
)
def add_delegate(
self,
safe_address: str,
delegate_address: str,
label: str,
signer_account: LocalAccount,
):
hash_to_sign = self.create_delegate_message_hash(delegate_address)
signature = signer_account.signHash(hash_to_sign)
add_payload = {
"safe": safe_address,
"delegate": delegate_address,
"signature": signature.signature.hex(),
"label": label,
}
response = self._post_request(
f"/api/v1/safes/{safe_address}/delegates/", add_payload
)
if not response.ok:
raise BaseAPIException(f"Cannot add delegate: {response.content}")
def remove_delegate(
self, safe_address: str, delegate_address: str, signer_account: LocalAccount
):
hash_to_sign = self.create_delegate_message_hash(delegate_address)
signature = signer_account.signHash(hash_to_sign)
remove_payload = {"signature": signature.signature.hex()}
response = self._delete_request(
f"/api/v1/safes/{safe_address}/delegates/{delegate_address}/",
remove_payload,
)
if not response.ok:
raise BaseAPIException(f"Cannot remove delegate: {response.content}")
def post_transaction(self, safe_address: str, safe_tx: SafeTx):
url = urljoin(
self.base_url, f"/api/v1/safes/{safe_address}/multisig-transactions/"
)
random_account = "0x1b95E981F808192Dc5cdCF92ef589f9CBe6891C4"
sender = safe_tx.sorted_signers[0] if safe_tx.sorted_signers else random_account
data = {
"to": safe_tx.to,
"value": safe_tx.value,
"data": safe_tx.data.hex() if safe_tx.data else None,
"operation": safe_tx.operation,
"gasToken": safe_tx.gas_token,
"safeTxGas": safe_tx.safe_tx_gas,
"baseGas": safe_tx.base_gas,
"gasPrice": safe_tx.gas_price,
"refundReceiver": safe_tx.refund_receiver,
"nonce": safe_tx.safe_nonce,
"contractTransactionHash": safe_tx.safe_tx_hash.hex(),
"sender": sender,
"signature": safe_tx.signatures.hex() if safe_tx.signatures else None,
"origin": "Safe-CLI",
}
response = requests.post(url, json=data)
if not response.ok:
raise BaseAPIException(f"Error posting transaction: {response.content}") | /safe_cli-0.7.1.tar.gz/safe_cli-0.7.1/safe_cli/api/transaction_service_api.py | 0.718693 | 0.277791 | transaction_service_api.py | pypi |
Undefined = object()
def safe_cmp(a, b, _nan_is_eq=False):
""" Python 2 compatible ``cmp`` function.
See also: https://github.com/python/cpython/blob/2.7/Objects/object.c#L768
Note: the ``_nan_is_eq`` argument is required to ensure that sorting is
consistent (ie, where ``nan`` does not move)::
sorted([1, nan]) == sorted([nan, 1])
But ``safe_cmp`` should _not_ return ``0`` when called directly (ie,
because ``safe_cmp(nan, 1)`` should not return ``0``).
"""
try:
res = (
-1 if a < b else
0 if a == b else
1 if a > b else
0 if _nan_is_eq else
None
)
if res is not None:
return res
a_is_nan = a != a
b_is_nan = b != b
return (
0 if a_is_nan and b_is_nan else
-1 if a_is_nan else
1
)
except TypeError:
pass
if a is None:
if b is None:
return 0
return -1
if b is None:
return 1
typ_a = type(a)
typ_b = type(b)
return safe_cmp(
(typ_a.__name__, id(typ_a)),
(typ_b.__name__, id(typ_b)),
)
def _build_comperator(name, op, lt, eq, gt):
ns = {"safe_cmp": safe_cmp}
exec(f"""
def comperator(self, b, __safe_cmp=safe_cmp):
res = __safe_cmp(self._obj, b, _nan_is_eq=True)
return (
{lt} if res == -1 else
{eq} if res == 0 else
{gt}
)""", ns, ns)
res = ns["comperator"]
res.__name__ = name
return res
class safe_order:
def __init__(self, obj):
self._obj = obj
__lt__ = _build_comperator("__lt__", "<", "True", "False", "False")
__le__ = _build_comperator("__le__", "<=", "True", "True", "False")
__gt__ = _build_comperator("__gt__", ">", "False", "False", "True")
__ge__ = _build_comperator("__ge__", ">=", "False", "True", "True")
def __repr__(self):
return f"safe_order({self._obj!r})"
def __str__(self):
return f"safe_order({self._obj})"
def safe_max(a, *rest, default=Undefined, key=None):
""" safe_max(iterable, *[, default=obj, key=func]) -> value
safe_max(arg1, arg2, *args, *[, key=func]) -> value
With a single iterable argument, return its biggest item. The
default keyword-only argument specifies an object to return if
the provided iterable is empty.
With two or more arguments, return the largest argument.
"""
items = a if not rest else (a, ) + rest
res = default
res_cmp = Undefined
for item in items:
if res is Undefined:
res = item
continue
if res_cmp is Undefined:
res_cmp = res if key is None else key(res)
item_cmp = item if key is None else key(item)
cmp_res = safe_cmp(res_cmp, item_cmp)
if cmp_res < 0:
res = item
res_cmp = item_cmp
if res is Undefined:
raise ValueError("safe_max() arg is an empty sequence")
return res
def safe_min(a, *rest, default=Undefined, key=None):
""" safe_min(iterable, *[, default=obj, key=func]) -> value
safe_min(arg1, arg2, *args, *[, key=func]) -> value
With a single iterable argument, return its biggest item. The
default keyword-only argument specifies an object to return if
the provided iterable is empty.
With two or more arguments, return the largest argument.
"""
items = a if not rest else (a, ) + rest
res = default
res_cmp = Undefined
for item in items:
if res is Undefined:
res = item
continue
if res_cmp is Undefined:
res_cmp = res if key is None else key(res)
item_cmp = item if key is None else key(item)
cmp_res = safe_cmp(res_cmp, item_cmp)
if cmp_res > 0:
res = item
res_cmp = item_cmp
if res is Undefined:
raise ValueError("safe_min() arg is an empty sequence")
return res
def safe_sorted(iter, *, key=None, reverse=False):
if key is None:
use_key = safe_order
else:
use_key = lambda x: safe_order(key(x))
return sorted(iter, key=use_key, reverse=reverse) | /safe_cmp-0.1.1.tar.gz/safe_cmp-0.1.1/safe_cmp/safe_cmp.py | 0.80406 | 0.423339 | safe_cmp.py | pypi |
from safeds.data.tabular.containers import Table
from safeds.exceptions import UnknownColumnNameError
class ExampleTable(Table):
"""
A `Table` with descriptions for its columns.
Parameters
----------
table : Table
The table.
column_descriptions : dict[str, str]
A dictionary mapping column names to their descriptions.
Raises
------
UnknownColumnNameError
If a column name in `descriptions` does not exist in `table`.
"""
# noinspection PyMissingConstructor
def __init__(self, table: Table, column_descriptions: dict[str, str]) -> None:
# Check that all column names in `descriptions` exist in `table`
invalid_column_names = set(column_descriptions.keys()) - set(table.column_names)
if invalid_column_names:
raise UnknownColumnNameError(list(invalid_column_names))
self._data = table._data
self._schema = table.schema
self._descriptions = column_descriptions
@property
def column_descriptions(self) -> Table:
"""
Return a `Table` contain the name of a column and its description.
The name is stored in a column called `"Name"` and the description in a column called `"Description"`.
"""
return Table(
{
"Name": self.column_names,
"Description": [self.get_column_description(column_name) for column_name in self.column_names],
},
)
def get_column_description(self, column_name: str) -> str:
"""
Get the description of a column. If no description exists, an empty string is returned.
Parameters
----------
column_name : str
The name of the column.
Returns
-------
description : str
The description of the column.
Raises
------
UnknownColumnNameError
If no column with the given name exists.
"""
if column_name not in self.column_names:
raise UnknownColumnNameError([column_name])
return self._descriptions.get(column_name, "") | /safe_ds_examples-0.15.0.tar.gz/safe_ds_examples-0.15.0/src/safeds_examples/tabular/containers/_example_table.py | 0.910404 | 0.546315 | _example_table.py | pypi |
from pathlib import Path
from safeds.data.tabular.containers import Table
from safeds_examples.tabular.containers import ExampleTable
_path = Path(__file__).parent / "data" / "house_sales.csv"
def load_house_sales() -> ExampleTable:
"""
Load the "House Sales" dataset.
Returns
-------
ExampleTable
The "House Sales" dataset.
"""
return ExampleTable(
Table.from_csv_file(str(_path)),
column_descriptions={
"id": "A unique identifier",
"year": "Year of sale",
"month": "Month of sale",
"day": "Day of sale",
"zipcode": "Zipcode",
"latitude": "Latitude",
"longitude": "Longitude",
"sqft_lot": "Lot area in square feet",
"sqft_living": "Interior living space in square feet",
"sqft_above": "Interior living space above ground in square feet",
"sqft_basement": "Interior living space below ground in square feet",
"floors": "Number of floors",
"bedrooms": "Number of bedrooms",
"bathrooms": "Number of bathrooms.\n\n"
"Fractional values indicate that components (toilet/sink/shower/bathtub) are missing.",
"waterfront": "Whether the building overlooks a waterfront (0 = no, 1 = yes)",
"view": "Rating of the view (1 to 5, higher is better)",
"condition": "Rating of the condition of the house (1 to 5, higher is better)",
"grade": "Rating of building construction and design (1 to 13, higher is better)",
"year_built": "Year the house was built",
"year_renovated": "Year the house was last renovated.\n\n"
"A value of 0 indicates that it was never renovated.",
"sqft_lot_15nn": "Lot area of the 15 nearest neighbors in square feet",
"sqft_living_15nn": "Interior living space of the 15 nearest neighbors in square feet",
"price": "Price the house sold for in USD",
},
) | /safe_ds_examples-0.15.0.tar.gz/safe_ds_examples-0.15.0/src/safeds_examples/tabular/_house_sales/_house_sales.py | 0.855097 | 0.49762 | _house_sales.py | pypi |
from __future__ import annotations
import copy
import io
import warnings
from pathlib import Path
from typing import Any, BinaryIO
import numpy as np
import PIL
from PIL import ImageEnhance, ImageFilter, ImageOps
from PIL.Image import Image as PillowImage
from PIL.Image import open as open_image
from skimage.util import random_noise
from safeds.data.image.typing import ImageFormat
from safeds.exceptions import ClosedBound, OutOfBoundsError
class Image:
"""
A container for image data.
Parameters
----------
data : BinaryIO
The image data as bytes.
"""
@staticmethod
def from_jpeg_file(path: str | Path) -> Image:
"""
Create an image from a JPEG file.
Parameters
----------
path : str | Path
The path to the JPEG file.
Returns
-------
image : Image
The image.
"""
return Image(
data=Path(path).open("rb"),
format_=ImageFormat.JPEG,
)
@staticmethod
def from_png_file(path: str | Path) -> Image:
"""
Create an image from a PNG file.
Parameters
----------
path : str | Path
The path to the PNG file.
Returns
-------
image : Image
The image.
"""
return Image(
data=Path(path).open("rb"),
format_=ImageFormat.PNG,
)
def __init__(self, data: BinaryIO, format_: ImageFormat):
data.seek(0)
self._image: PillowImage = open_image(data, formats=[format_.value])
self._format: ImageFormat = format_
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def format(self) -> ImageFormat:
"""
Get the image format.
Returns
-------
format : ImageFormat
The image format.
"""
return self._format
@property
def width(self) -> int:
"""
Get the width of the image in pixels.
Returns
-------
width : int
The width of the image.
"""
return self._image.width
@property
def height(self) -> int:
"""
Get the height of the image in pixels.
Returns
-------
height : int
The height of the image.
"""
return self._image.height
# ------------------------------------------------------------------------------------------------------------------
# Conversion
# ------------------------------------------------------------------------------------------------------------------
def to_jpeg_file(self, path: str | Path) -> None:
"""
Save the image as a JPEG file.
Parameters
----------
path : str | Path
The path to the JPEG file.
"""
Path(path).parent.mkdir(parents=True, exist_ok=True)
self._image.save(path, format="jpeg")
def to_png_file(self, path: str | Path) -> None:
"""
Save the image as a PNG file.
Parameters
----------
path : str | Path
The path to the PNG file.
"""
Path(path).parent.mkdir(parents=True, exist_ok=True)
self._image.save(path, format="png")
# ------------------------------------------------------------------------------------------------------------------
# IPython integration
# ------------------------------------------------------------------------------------------------------------------
def __eq__(self, other: Any) -> bool:
"""
Compare two images.
Parameters
----------
other: The image to compare to.
Returns
-------
equals : bool
Whether the two images contain equal pixel data.
"""
if not isinstance(other, Image):
return NotImplemented
return self._image.tobytes() == other._image.tobytes()
def _repr_jpeg_(self) -> bytes | None:
"""
Return a JPEG image as bytes.
If the image is not a JPEG, return None.
Returns
-------
jpeg : bytes | None
The image as JPEG.
"""
if self._format != ImageFormat.JPEG:
return None
buffer = io.BytesIO()
self._image.save(buffer, format="jpeg")
buffer.seek(0)
return buffer.read()
def _repr_png_(self) -> bytes | None:
"""
Return a PNG image as bytes.
If the image is not a PNG, return None.
Returns
-------
png : bytes | None
The image as PNG.
"""
if self._format != ImageFormat.PNG:
return None
buffer = io.BytesIO()
self._image.save(buffer, format="png")
buffer.seek(0)
return buffer.read()
# ------------------------------------------------------------------------------------------------------------------
# Transformations
# ------------------------------------------------------------------------------------------------------------------
def resize(self, new_width: int, new_height: int) -> Image:
"""
Return a new `Image` that has been resized to a given size.
The original image is not modified.
Returns
-------
result : Image
The image with the given width and height.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.resize((new_width, new_height))
return image_copy
def convert_to_grayscale(self) -> Image:
"""
Return a new `Image` that is converted to grayscale.
The original image is not modified.
Returns
-------
result : Image
The grayscale image.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.convert("L")
return image_copy
def crop(self, x: int, y: int, width: int, height: int) -> Image:
"""
Return a new `Image` that has been cropped to a given bounding rectangle.
The original image is not modified.
Parameters
----------
x: the x coordinate of the top-left corner of the bounding rectangle
y: the y coordinate of the top-left corner of the bounding rectangle
width: the width of the bounding rectangle
height: the height of the bounding rectangle
Returns
-------
result : Image
The cropped image.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.crop((x, y, (x + width), (y + height)))
return image_copy
def flip_vertically(self) -> Image:
"""
Return a new `Image` that is flipped vertically (horizontal axis, flips up-down and vice versa).
The original image is not modified.
Returns
-------
result : Image
The flipped image.
"""
image_copy = copy.deepcopy(self)
image_copy._image = self._image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
return image_copy
def flip_horizontally(self) -> Image:
"""
Return a new `Ìmage` that is flipped horizontally (vertical axis, flips left-right and vice versa).
The original image is not modified.
Returns
-------
result : Image
The flipped image.
"""
image_copy = copy.deepcopy(self)
image_copy._image = self._image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
return image_copy
def adjust_brightness(self, factor: float) -> Image:
"""
Return a new `Image` with an adjusted brightness.
The original image is not modified.
Parameters
----------
factor: float
The brightness factor.
1.0 will not change the brightness.
Below 1.0 will result in a darker image.
Above 1.0 will resolut in a brighter image.
Has to be bigger than or equal to 0 (black).
Returns
-------
result: Image
The Image with adjusted brightness.
"""
if factor < 0:
raise OutOfBoundsError(factor, name="factor", lower_bound=ClosedBound(0))
elif factor == 1:
warnings.warn(
"Brightness adjustment factor is 1.0, this will not make changes to the image.",
UserWarning,
stacklevel=2,
)
image_copy = copy.deepcopy(self)
image_copy._image = ImageEnhance.Brightness(image_copy._image).enhance(factor)
return image_copy
def add_gaussian_noise(self, standard_deviation: float) -> Image:
"""
Return a new `Image` with Gaussian noise added to the image.
The original image is not modified.
Parameters
----------
standard_deviation : float
The standard deviation of the Gaussian distribution. Has to be bigger than or equal to 0.
Returns
-------
result : Image
The image with added Gaussian noise.
Raises
------
OutOfBoundsError
If standard_deviation is smaller than 0.
"""
if standard_deviation < 0:
raise OutOfBoundsError(standard_deviation, name="standard_deviation", lower_bound=ClosedBound(0))
# noinspection PyTypeChecker
image_as_array = np.asarray(self._image)
noisy_image_as_array = random_noise(
image_as_array,
mode="gaussian",
var=standard_deviation**2,
rng=42,
clip=True,
)
noisy_image = PIL.Image.fromarray(np.uint8(255 * noisy_image_as_array))
image_copy = copy.deepcopy(self)
image_copy._image = noisy_image
return image_copy
def adjust_contrast(self, factor: float) -> Image:
"""
Return a new `Image` with adjusted contrast.
The original image is not modified.
Parameters
----------
factor: float
If factor > 1, increase contrast of image.
If factor = 1, no changes will be made.
If factor < 1, make image greyer.
Has to be bigger than or equal to 0 (gray).
Returns
-------
image: Image
New image with adjusted contrast.
"""
if factor < 0:
raise OutOfBoundsError(factor, name="factor", lower_bound=ClosedBound(0))
elif factor == 1:
warnings.warn(
"Contrast adjustment factor is 1.0, this will not make changes to the image.",
UserWarning,
stacklevel=2,
)
image_copy = copy.deepcopy(self)
image_copy._image = ImageEnhance.Contrast(image_copy._image).enhance(factor)
return image_copy
def adjust_color_balance(self, factor: float) -> Image:
"""
Return a new `Image` with adjusted color balance.
The original image is not modified.
Parameters
----------
factor: float
If factor > 1, increase color balance of image.
If factor = 1, no changes will be made.
If factor < 1, make image greyer.
Has to be bigger than or equal to 0.
Returns
-------
image: Image
The new, adjusted image.
"""
if factor < 0:
raise OutOfBoundsError(factor, name="factor", lower_bound=ClosedBound(0))
elif factor == 1:
warnings.warn(
"Color adjustment factor is 1.0, this will not make changes to the image.",
UserWarning,
stacklevel=2,
)
image_copy = copy.deepcopy(self)
image_copy._image = ImageEnhance.Color(image_copy._image).enhance(factor)
return image_copy
def blur(self, radius: int) -> Image:
"""
Return a blurred version of the image.
The original image is not modified.
Parameters
----------
radius : int
Radius is directly proportional to the blur value. The radius is equal to the amount of pixels united in
each direction. A radius of 1 will result in a united box of 9 pixels.
Returns
-------
result : Image
The blurred image.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.filter(ImageFilter.BoxBlur(radius))
return image_copy
def sharpen(self, factor: float) -> Image:
"""
Return a sharpened version of the image.
The original image is not modified.
Parameters
----------
factor : float
The amount of sharpness to be applied to the image. Factor 1.0 is considered to be neutral and does not make
any changes.
Returns
-------
result : Image
The image sharpened by the given factor.
"""
image_copy = copy.deepcopy(self)
image_copy._image = ImageEnhance.Sharpness(image_copy._image).enhance(factor)
return image_copy
def invert_colors(self) -> Image:
"""
Return a new image with colors inverted.
The original image is not modified.
Returns
-------
result : Image
The image with inverted colors.
"""
image_copy = copy.deepcopy(self)
image_copy._image = ImageOps.invert(image_copy._image.convert("RGB"))
return image_copy
def rotate_right(self) -> Image:
"""
Return a new `Image` that is rotated 90 degrees clockwise.
The original image is not modified.
Returns
-------
result : Image
The image rotated 90 degrees clockwise.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.rotate(270, expand=True)
return image_copy
def rotate_left(self) -> Image:
"""
Return a new `Image` that is rotated 90 degrees counter-clockwise.
The original image is not modified.
Returns
-------
result : Image
The image rotated 90 degrees counter-clockwise.
"""
image_copy = copy.deepcopy(self)
image_copy._image = image_copy._image.rotate(90, expand=True)
return image_copy
def find_edges(self) -> Image:
"""
Return a grayscale version of the image with the edges highlighted.
The original image is not modified.
Returns
-------
result : Image
The image with edges found.
"""
image_copy = copy.deepcopy(self)
image_copy = image_copy.convert_to_grayscale()
image_copy._image = image_copy._image.filter(ImageFilter.FIND_EDGES)
return image_copy | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/image/containers/_image.py | 0.951431 | 0.563678 | _image.py | pypi |
from __future__ import annotations
import copy
import functools
import io
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, TypeVar
import Levenshtein
import matplotlib.pyplot as plt
import numpy as np
import openpyxl
import pandas as pd
import seaborn as sns
from pandas import DataFrame
from scipy import stats
from safeds.data.image.containers import Image
from safeds.data.image.typing import ImageFormat
from safeds.data.tabular.typing import ColumnType, Schema
from safeds.exceptions import (
ColumnLengthMismatchError,
ColumnSizeError,
DuplicateColumnNameError,
IndexOutOfBoundsError,
NonNumericColumnError,
UnknownColumnNameError,
WrongFileExtensionError,
)
from ._column import Column
from ._row import Row
if TYPE_CHECKING:
from collections.abc import Callable, Mapping, Sequence
from safeds.data.tabular.transformation import InvertibleTableTransformer, TableTransformer
from ._tagged_table import TaggedTable
# noinspection PyProtectedMember
class Table:
"""
A table is a two-dimensional collection of data. It can either be seen as a list of rows or as a list of columns.
To create a `Table` call the constructor or use one of the following static methods:
| Method | Description |
| ---------------------------------------------------------------------------- | -------------------------------------- |
| [from_csv_file][safeds.data.tabular.containers._table.Table.from_csv_file] | Create a table from a CSV file. |
| [from_json_file][safeds.data.tabular.containers._table.Table.from_json_file] | Create a table from a JSON file. |
| [from_dict][safeds.data.tabular.containers._table.Table.from_dict] | Create a table from a dictionary. |
| [from_columns][safeds.data.tabular.containers._table.Table.from_columns] | Create a table from a list of columns. |
| [from_rows][safeds.data.tabular.containers._table.Table.from_rows] | Create a table from a list of rows. |
Note: When removing the last column of the table, the `number_of_columns` property will be set to 0.
Parameters
----------
data : Mapping[str, Sequence[Any]] | None
The data. If None, an empty table is created.
Raises
------
ColumnLengthMismatchError
If columns have different lengths.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table({"a": [1, 2, 3], "b": [4, 5, 6]})
"""
# ------------------------------------------------------------------------------------------------------------------
# Creation
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def from_csv_file(path: str | Path) -> Table:
"""
Read data from a CSV file into a table.
Parameters
----------
path : str | Path
The path to the CSV file.
Returns
-------
table : Table
The table created from the CSV file.
Raises
------
FileNotFoundError
If the specified file does not exist.
WrongFileExtensionError
If the file is not a csv file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> Table.from_csv_file('./src/resources/from_csv_file.csv')
a b c
0 1 2 1
1 0 0 7
"""
path = Path(path)
if path.suffix != ".csv":
raise WrongFileExtensionError(path, ".csv")
if path.exists():
with path.open() as f:
if f.read().replace("\n", "") == "":
return Table()
return Table._from_pandas_dataframe(pd.read_csv(path))
else:
raise FileNotFoundError(f'File "{path}" does not exist')
@staticmethod
def from_excel_file(path: str | Path) -> Table:
"""
Read data from an Excel file into a table.
Valid file extensions are `.xls`, '.xlsx', `.xlsm`, `.xlsb`, `.odf`, `.ods` and `.odt`.
Parameters
----------
path : str | Path
The path to the Excel file.
Returns
-------
table : Table
The table created from the Excel file.
Raises
------
FileNotFoundError
If the specified file does not exist.
WrongFileExtensionError
If the file is not an Excel file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> Table.from_excel_file('./src/resources/from_excel_file.xlsx')
a b
0 1 4
1 2 5
2 3 6
"""
path = Path(path)
excel_extensions = [".xls", ".xlsx", ".xlsm", ".xlsb", ".odf", ".ods", ".odt"]
if path.suffix not in excel_extensions:
raise WrongFileExtensionError(path, excel_extensions)
try:
return Table._from_pandas_dataframe(
pd.read_excel(path, engine="openpyxl", usecols=lambda colname: "Unnamed" not in colname),
)
except FileNotFoundError as exception:
raise FileNotFoundError(f'File "{path}" does not exist') from exception
@staticmethod
def from_json_file(path: str | Path) -> Table:
"""
Read data from a JSON file into a table.
Parameters
----------
path : str | Path
The path to the JSON file.
Returns
-------
table : Table
The table created from the JSON file.
Raises
------
FileNotFoundError
If the specified file does not exist.
WrongFileExtensionError
If the file is not a JSON file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> Table.from_json_file('./src/resources/from_json_file.json')
a b
0 1 4
1 2 5
2 3 6
"""
path = Path(path)
if path.suffix != ".json":
raise WrongFileExtensionError(path, ".json")
if path.exists():
with path.open() as f:
if f.read().replace("\n", "") in ("", "{}"):
return Table()
return Table._from_pandas_dataframe(pd.read_json(path))
else:
raise FileNotFoundError(f'File "{path}" does not exist')
@staticmethod
def from_dict(data: dict[str, list[Any]]) -> Table:
"""
Create a table from a dictionary that maps column names to column values.
Parameters
----------
data : dict[str, list[Any]]
The data.
Returns
-------
table : Table
The generated table.
Raises
------
ColumnLengthMismatchError
If columns have different lengths.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> d = {'a': [1, 2], 'b': [3, 4]}
>>> Table.from_dict(d)
a b
0 1 3
1 2 4
"""
return Table(data)
@staticmethod
def from_columns(columns: list[Column]) -> Table:
"""
Return a table created from a list of columns.
Parameters
----------
columns : list[Column]
The columns to be combined. They need to have the same size.
Returns
-------
table : Table
The generated table.
Raises
------
ColumnLengthMismatchError
If any of the column sizes does not match with the others.
DuplicateColumnNameError
If multiple columns have the same name.
Examples
--------
>>> from safeds.data.tabular.containers import Column, Table
>>> col1 = Column("a", [1, 2, 3])
>>> col2 = Column("b", [4, 5, 6])
>>> Table.from_columns([col1, col2])
a b
0 1 4
1 2 5
2 3 6
"""
dataframe: DataFrame = pd.DataFrame()
column_names = []
for column in columns:
if column._data.size != columns[0]._data.size:
raise ColumnLengthMismatchError(
"\n".join(f"{column.name}: {column._data.size}" for column in columns),
)
if column.name in column_names:
raise DuplicateColumnNameError(column.name)
column_names.append(column.name)
dataframe[column.name] = column._data
return Table._from_pandas_dataframe(dataframe)
@staticmethod
def from_rows(rows: list[Row]) -> Table:
"""
Return a table created from a list of rows.
Parameters
----------
rows : list[Row]
The rows to be combined. They need to have a matching schema.
Returns
-------
table : Table
The generated table.
Raises
------
UnknownColumnNameError
If any of the row column names does not match with the first row.
Examples
--------
>>> from safeds.data.tabular.containers import Row, Table
>>> row1 = Row({"a": 1, "b": 2})
>>> row2 = Row({"a": 3, "b": 4})
>>> Table.from_rows([row1, row2])
a b
0 1 2
1 3 4
"""
if len(rows) == 0:
return Table._from_pandas_dataframe(pd.DataFrame())
column_names_compare: list = list(rows[0].column_names)
unknown_column_names = set()
row_array: list[pd.DataFrame] = []
for row in rows:
unknown_column_names.update(set(column_names_compare) - set(row.column_names))
row_array.append(row._data)
if len(unknown_column_names) > 0:
raise UnknownColumnNameError(list(unknown_column_names))
dataframe: DataFrame = pd.concat(row_array, ignore_index=True)
dataframe.columns = column_names_compare
schema = Schema.merge_multiple_schemas([row.schema for row in rows])
return Table._from_pandas_dataframe(dataframe, schema)
@staticmethod
def _from_pandas_dataframe(data: pd.DataFrame, schema: Schema | None = None) -> Table:
"""
Create a table from a `pandas.DataFrame`.
Parameters
----------
data : pd.DataFrame
The data.
schema : Schema | None
The schema. If None, the schema is inferred from the data.
Returns
-------
table : Table
The created table.
Examples
--------
>>> import pandas as pd
>>> from safeds.data.tabular.containers import Table
>>> Table._from_pandas_dataframe(pd.DataFrame({"a": [1], "b": [2]}))
a b
0 1 2
"""
data = data.reset_index(drop=True)
result = object.__new__(Table)
result._data = data
if schema is None:
# noinspection PyProtectedMember
result._schema = Schema._from_pandas_dataframe(data)
else:
result._schema = schema
if result._data.empty:
result._data = pd.DataFrame(columns=schema.column_names)
return result
# ------------------------------------------------------------------------------------------------------------------
# Dunder methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, data: Mapping[str, Sequence[Any]] | None = None) -> None:
"""
Create a table from a mapping of column names to their values.
Parameters
----------
data : Mapping[str, Sequence[Any]] | None
The data. If None, an empty table is created.
Raises
------
ColumnLengthMismatchError
If columns have different lengths.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> Table({"a": [1, 2, 3], "b": [4, 5, 6]})
a b
0 1 4
1 2 5
2 3 6
"""
if data is None:
data = {}
# Validation
expected_length: int | None = None
for column_values in data.values():
if expected_length is None:
expected_length = len(column_values)
elif len(column_values) != expected_length:
raise ColumnLengthMismatchError(
"\n".join(f"{column_name}: {len(column_values)}" for column_name, column_values in data.items()),
)
# Implementation
self._data: pd.DataFrame = pd.DataFrame(data)
self._data = self._data.reset_index(drop=True)
self._schema: Schema = Schema._from_pandas_dataframe(self._data)
def __eq__(self, other: Any) -> bool:
"""
Compare two table instances.
Returns
-------
'True' if contents are equal, 'False' otherwise.
Examples
--------
>>> from safeds.data.tabular.containers import Row, Table
>>> row1 = Row({"a": 1, "b": 2})
>>> row2 = Row({"a": 3, "b": 4})
>>> row3 = Row({"a": 5, "b": 6})
>>> table1 = Table.from_rows([row1, row2])
>>> table2 = Table.from_rows([row1, row2])
>>> table3 = Table.from_rows([row1, row3])
>>> table1 == table2
True
>>> table1 == table3
False
"""
if not isinstance(other, Table):
return NotImplemented
if self is other:
return True
if self.number_of_columns == 0 and other.number_of_columns == 0:
return True
table1 = self.sort_columns()
table2 = other.sort_columns()
if table1.number_of_rows == 0 and table2.number_of_rows == 0:
return table1.column_names == table2.column_names
return table1._schema == table2._schema and table1._data.equals(table2._data)
def __repr__(self) -> str:
r"""
Display the table in only one line.
Returns
-------
A string representation of the table in only one line.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> repr(table)
' a b\n0 1 2\n1 3 4'
"""
tmp = self._data.copy(deep=True)
tmp.columns = self.column_names
return tmp.__repr__()
def __str__(self) -> str:
tmp = self._data.copy(deep=True)
tmp.columns = self.column_names
return tmp.__str__()
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def column_names(self) -> list[str]:
"""
Return a list of all column names in this table.
Alias for self.schema.column_names -> list[str].
Returns
-------
column_names : list[str]
The list of the column names.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"col1": [1, 3], "col2": [2, 4]})
>>> table.column_names
['col1', 'col2']
"""
return self._schema.column_names
@property
def number_of_columns(self) -> int:
"""
Return the number of columns.
Returns
-------
number_of_columns : int
The number of columns.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> table.number_of_columns
2
"""
return self._data.shape[1]
@property
def number_of_rows(self) -> int:
"""
Return the number of rows.
Returns
-------
number_of_rows : int
The number of rows.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> table.number_of_rows
1
"""
return self._data.shape[0]
@property
def schema(self) -> Schema:
"""
Return the schema of the table.
Returns
-------
schema : Schema
The schema.
Examples
--------
>>> from safeds.data.tabular.containers import Row, Table
>>> row = Row({"a": 1, "b": 2.5, "c": "ff"})
>>> table = Table.from_dict({"a": [1, 8], "b": [2.5, 9], "c": ["g", "g"]})
>>> table.schema
Schema({
'a': Integer,
'b': RealNumber,
'c': String
})
>>> table.schema == row.schema
True
"""
return self._schema
# ------------------------------------------------------------------------------------------------------------------
# Getters
# ------------------------------------------------------------------------------------------------------------------
def get_column(self, column_name: str) -> Column:
"""
Return a column with the data of the specified column.
Parameters
----------
column_name : str
The name of the column.
Returns
-------
column : Column
The column.
Raises
------
UnknownColumnNameError
If the specified target column name does not exist.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> table.get_column("b")
Column('b', [2])
"""
if not self.has_column(column_name):
similar_columns = self._get_similar_columns(column_name)
raise UnknownColumnNameError([column_name], similar_columns)
return Column._from_pandas_series(
self._data[column_name],
self.get_column_type(column_name),
)
def has_column(self, column_name: str) -> bool:
"""
Return whether the table contains a given column.
Alias for self.schema.hasColumn(column_name: str) -> bool.
Parameters
----------
column_name : str
The name of the column.
Returns
-------
contains : bool
True if the column exists.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> table.has_column("b")
True
>>> table.has_column("c")
False
"""
return self._schema.has_column(column_name)
def get_column_type(self, column_name: str) -> ColumnType:
"""
Return the type of the given column.
Alias for self.schema.get_type_of_column(column_name: str) -> ColumnType.
Parameters
----------
column_name : str
The name of the column to be queried.
Returns
-------
type : ColumnType
The type of the column.
Raises
------
UnknownColumnNameError
If the specified target column name does not exist.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2.5]})
>>> table.get_column_type("b")
RealNumber
"""
return self._schema.get_column_type(column_name)
def get_row(self, index: int) -> Row:
"""
Return the row at a specified index.
Parameters
----------
index : int
The index.
Returns
-------
row : Row
The row of the table at the index.
Raises
------
IndexOutOfBoundsError
If no row at the specified index exists in this table.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> table.get_row(0)
Row({
'a': 1,
'b': 2
})
"""
if len(self._data.index) - 1 < index or index < 0:
raise IndexOutOfBoundsError(index)
return Row._from_pandas_dataframe(self._data.iloc[[index]], self._schema)
def _get_similar_columns(self, column_name: str) -> list[str]:
"""
Get all the column names in a Table that are similar to a given name.
Parameters
----------
column_name : str
The name to compare the Table's column names to.
Returns
-------
similar_columns: list[str]
A list of all column names in the Table that are similar or equal to the given column name.
"""
similar_columns = []
similarity = 0.6
i = 0
while i < len(self.column_names):
if Levenshtein.jaro_winkler(self.column_names[i], column_name) >= similarity:
similar_columns.append(self.column_names[i])
i += 1
if len(similar_columns) == 4 and similarity < 0.9:
similarity += 0.1
similar_columns = []
i = 0
return similar_columns
# ------------------------------------------------------------------------------------------------------------------
# Information
# ------------------------------------------------------------------------------------------------------------------
def summarize_statistics(self) -> Table:
"""
Return a table with a number of statistical key values.
The original table is not modified.
Returns
-------
result : Table
The table with statistics.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> table.summarize_statistics()
metrics a b
0 maximum 3 4
1 minimum 1 2
2 mean 2.0 3.0
3 mode [1, 3] [2, 4]
4 median 2.0 3.0
5 sum 4 6
6 variance 2.0 2.0
7 standard deviation 1.4142135623730951 1.4142135623730951
8 idness 1.0 1.0
9 stability 0.5 0.5
"""
if self.number_of_columns == 0:
return Table(
{
"metrics": [
"maximum",
"minimum",
"mean",
"mode",
"median",
"sum",
"variance",
"standard deviation",
"idness",
"stability",
],
},
)
elif self.number_of_rows == 0:
table = Table(
{
"metrics": [
"maximum",
"minimum",
"mean",
"mode",
"median",
"sum",
"variance",
"standard deviation",
"idness",
"stability",
],
},
)
for name in self.column_names:
table = table.add_column(Column(name, ["-", "-", "-", "-", "-", "-", "-", "-", "-", "-"]))
return table
columns = self.to_columns()
result = pd.DataFrame()
statistics = {}
for column in columns:
statistics = {
"maximum": column.maximum,
"minimum": column.minimum,
"mean": column.mean,
"mode": column.mode,
"median": column.median,
"sum": column.sum,
"variance": column.variance,
"standard deviation": column.standard_deviation,
"idness": column.idness,
"stability": column.stability,
}
values = []
for function in statistics.values():
try:
values.append(str(function()))
except (NonNumericColumnError, ValueError):
values.append("-")
result = pd.concat([result, pd.DataFrame(values)], axis=1)
result = pd.concat([pd.DataFrame(list(statistics.keys())), result], axis=1)
result.columns = ["metrics", *self.column_names]
return Table._from_pandas_dataframe(result)
# ------------------------------------------------------------------------------------------------------------------
# Transformations
# ------------------------------------------------------------------------------------------------------------------
# This method is meant as a way to "cast" instances of subclasses of `Table` to a proper `Table`, dropping any
# additional constraints that might have to hold in the subclass.
# Override accordingly in subclasses.
def _as_table(self: Table) -> Table:
"""
Transform the table to an instance of the Table class.
Returns
-------
table: Table
The table, as an instance of the Table class.
"""
return self
def add_column(self, column: Column) -> Table:
"""
Return a new table with the provided column attached at the end.
The original table is not modified.
Returns
-------
result : Table
The table with the column attached.
Raises
------
DuplicateColumnNameError
If the new column already exists.
ColumnSizeError
If the size of the column does not match the number of rows.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> col = Column("c", ["d", "e"])
>>> table.add_column(col)
a b c
0 1 2 d
1 3 4 e
"""
if self.has_column(column.name):
raise DuplicateColumnNameError(column.name)
if column.number_of_rows != self.number_of_rows and self.number_of_columns != 0:
raise ColumnSizeError(str(self.number_of_rows), str(column._data.size))
result = self._data.copy()
result.columns = self._schema.column_names
result[column.name] = column._data
return Table._from_pandas_dataframe(result)
def add_columns(self, columns: list[Column] | Table) -> Table:
"""
Return a new `Table` with multiple added columns.
The original table is not modified.
Parameters
----------
columns : list[Column] or Table
The columns to be added.
Returns
-------
result: Table
A new table combining the original table and the given columns.
Raises
------
DuplicateColumnNameError
If at least one column name from the provided column list already exists in the table.
ColumnSizeError
If at least one of the column sizes from the provided column list does not match the table.
Examples
--------
>>> from safeds.data.tabular.containers import Column, Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> col1 = Column("c", ["d", "e"])
>>> col2 = Column("d", [3.5, 7.9])
>>> table.add_columns([col1, col2])
a b c d
0 1 2 d 3.5
1 3 4 e 7.9
"""
if isinstance(columns, Table):
columns = columns.to_columns()
result = self._data.copy()
result.columns = self._schema.column_names
for column in columns:
if column.name in result.columns:
raise DuplicateColumnNameError(column.name)
if column.number_of_rows != self.number_of_rows and self.number_of_columns != 0:
raise ColumnSizeError(str(self.number_of_rows), str(column._data.size))
result[column.name] = column._data
return Table._from_pandas_dataframe(result)
def add_row(self, row: Row) -> Table:
"""
Return a new `Table` with an added Row attached.
If the table happens to be empty beforehand, respective columns will be added automatically.
The order of columns of the new row will be adjusted to the order of columns in the table.
The new table will contain the merged schema.
The original table is not modified.
Parameters
----------
row : Row
The row to be added.
Returns
-------
table : Table
A new table with the added row at the end.
Raises
------
UnknownColumnNameError
If the row has different column names than the table.
Examples
--------
>>> from safeds.data.tabular.containers import Row, Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> row = Row.from_dict({"a": 3, "b": 4})
>>> table.add_row(row)
a b
0 1 2
1 3 4
"""
int_columns = []
result = self._copy()
if self.number_of_columns == 0:
return Table.from_rows([row])
if len(set(self.column_names) - set(row.column_names)) > 0:
raise UnknownColumnNameError(
sorted(
set(self.column_names) - set(row.column_names),
key={val: ix for ix, val in enumerate(self.column_names)}.__getitem__,
),
)
if result.number_of_rows == 0:
int_columns = list(filter(lambda name: isinstance(row[name], int | np.int64 | np.int32), row.column_names))
new_df = pd.concat([result._data, row._data]).infer_objects()
new_df.columns = result.column_names
schema = Schema.merge_multiple_schemas([result.schema, row.schema])
result = Table._from_pandas_dataframe(new_df, schema)
for column in int_columns:
result = result.replace_column(column, [result.get_column(column).transform(lambda it: int(it))])
return result
def add_rows(self, rows: list[Row] | Table) -> Table:
"""
Return a new `Table` with multiple added Rows attached.
The order of columns of the new rows will be adjusted to the order of columns in the table.
The new table will contain the merged schema.
The original table is not modified.
Parameters
----------
rows : list[Row] or Table
The rows to be added.
Returns
-------
result : Table
A new table which combines the original table and the given rows.
Raises
------
UnknownColumnNameError
If at least one of the rows have different column names than the table.
Examples
--------
>>> from safeds.data.tabular.containers import Row, Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> row1 = Row.from_dict({"a": 3, "b": 4})
>>> row2 = Row.from_dict({"a": 5, "b": 6})
>>> table.add_rows([row1, row2])
a b
0 1 2
1 3 4
2 5 6
"""
if isinstance(rows, Table):
rows = rows.to_rows()
if len(rows) == 0:
return self._copy()
different_column_names = set()
for row in rows:
different_column_names.update(set(self.column_names) - set(row.column_names))
if len(different_column_names) > 0:
raise UnknownColumnNameError(
sorted(
different_column_names,
key={val: ix for ix, val in enumerate(self.column_names)}.__getitem__,
),
)
result = self._copy()
for row in rows:
result = result.add_row(row)
return result
def filter_rows(self, query: Callable[[Row], bool]) -> Table:
"""
Return a new table with rows filtered by Callable (e.g. lambda function).
The original table is not modified.
Parameters
----------
query : lambda function
A Callable that is applied to all rows.
Returns
-------
table : Table
A table containing only the rows filtered by the query.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> table.filter_rows(lambda x: x["a"] < 2)
a b
0 1 2
"""
rows: list[Row] = [row for row in self.to_rows() if query(row)]
if len(rows) == 0:
result_table = Table._from_pandas_dataframe(pd.DataFrame(), self._schema)
else:
result_table = self.from_rows(rows)
return result_table
_T = TypeVar("_T")
def group_rows_by(self, key_selector: Callable[[Row], _T]) -> dict[_T, Table]:
"""
Return a dictionary with copies of the output tables as values and the keys from the key_selector.
The original table is not modified.
Parameters
----------
key_selector : Callable[[Row], _T]
A Callable that is applied to all rows and returns the key of the group.
Returns
-------
dictionary : dict
A dictionary containing the new tables as values and the selected keys as keys.
"""
dictionary: dict[Table._T, Table] = {}
for row in self.to_rows():
if key_selector(row) in dictionary:
dictionary[key_selector(row)] = dictionary[key_selector(row)].add_row(row)
else:
dictionary[key_selector(row)] = Table.from_rows([row])
return dictionary
def keep_only_columns(self, column_names: list[str]) -> Table:
"""
Return a new table with only the given column(s).
The original table is not modified.
Note: When removing the last column of the table, the `number_of_columns` property will be set to 0.
Parameters
----------
column_names : list[str]
A list containing only the columns to be kept.
Returns
-------
table : Table
A table containing only the given column(s).
Raises
------
UnknownColumnNameError
If any of the given columns does not exist.
IllegalSchemaModificationError
If removing the columns would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> table.keep_only_columns(["b"])
b
0 2
1 4
"""
invalid_columns = []
similar_columns: list[str] = []
for name in column_names:
if not self._schema.has_column(name):
similar_columns = similar_columns + self._get_similar_columns(name)
invalid_columns.append(name)
if len(invalid_columns) != 0:
raise UnknownColumnNameError(invalid_columns, similar_columns)
clone = self._copy()
clone = clone.remove_columns(list(set(self.column_names) - set(column_names)))
return clone
def remove_columns(self, column_names: list[str]) -> Table:
"""
Return a new table without the given column(s).
The original table is not modified.
Note: When removing the last column of the table, the `number_of_columns` property will be set to 0.
Parameters
----------
column_names : list[str]
A list containing all columns to be dropped.
Returns
-------
table : Table
A table without the given columns.
Raises
------
UnknownColumnNameError
If any of the given columns does not exist.
IllegalSchemaModificationError
If removing the columns would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3], "b": [2, 4]})
>>> table.remove_columns(["b"])
a
0 1
1 3
"""
invalid_columns = []
similar_columns: list[str] = []
for name in column_names:
if not self._schema.has_column(name):
similar_columns = similar_columns + self._get_similar_columns(name)
invalid_columns.append(name)
if len(invalid_columns) != 0:
raise UnknownColumnNameError(invalid_columns, similar_columns)
transformed_data = self._data.drop(labels=column_names, axis="columns")
transformed_data.columns = [name for name in self._schema.column_names if name not in column_names]
if len(transformed_data.columns) == 0:
return Table()
return Table._from_pandas_dataframe(transformed_data)
def remove_columns_with_missing_values(self) -> Table:
"""
Return a new table without the columns that contain missing values.
The original table is not modified.
Note: When removing the last column of the table, the `number_of_columns` property will be set to 0.
Returns
-------
table : Table
A table without the columns that contain missing values.
Raises
------
IllegalSchemaModificationError
If removing the columns would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 2], "b": [None, 2]})
>>> table.remove_columns_with_missing_values()
a
0 1
1 2
"""
return Table.from_columns([column for column in self.to_columns() if not column.has_missing_values()])
def remove_columns_with_non_numerical_values(self) -> Table:
"""
Return a new table without the columns that contain non-numerical values.
The original table is not modified.
Note: When removing the last column of the table, the `number_of_columns` property will be set to 0.
Returns
-------
table : Table
A table without the columns that contain non-numerical values.
Raises
------
IllegalSchemaModificationError
If removing the columns would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 0], "b": ["test", 2]})
>>> table.remove_columns_with_non_numerical_values()
a
0 1
1 0
"""
return Table.from_columns([column for column in self.to_columns() if column.type.is_numeric()])
def remove_duplicate_rows(self) -> Table:
"""
Return a new table with every duplicate row removed.
The original table is not modified.
Returns
-------
result : Table
The table with the duplicate rows removed.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3, 3], "b": [2, 4, 4]})
>>> table.remove_duplicate_rows()
a b
0 1 2
1 3 4
"""
result = self._data.drop_duplicates(ignore_index=True)
result.columns = self._schema.column_names
return Table._from_pandas_dataframe(result)
def remove_rows_with_missing_values(self) -> Table:
"""
Return a new table without the rows that contain missing values.
The original table is not modified.
Returns
-------
table : Table
A table without the rows that contain missing values.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1.0, None, 3], "b": [2, 4.0, None]})
>>> table.remove_rows_with_missing_values()
a b
0 1.0 2.0
"""
result = self._data.copy(deep=True)
result = result.dropna(axis="index")
return Table._from_pandas_dataframe(result)
def remove_rows_with_outliers(self) -> Table:
"""
Return a new table without those rows that contain at least one outlier.
We define an outlier as a value that has a distance of more than 3 standard deviations from the column mean.
Missing values are not considered outliers. They are also ignored during the calculation of the standard
deviation.
The original table is not modified.
Returns
-------
new_table : Table
A new table without rows containing outliers.
Examples
--------
>>> from safeds.data.tabular.containers import Column, Table
>>> c1 = Column("a", [1, 3, 1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> c2 = Column("b", [1.5, 1, 0.5, 0.01, 0, 0, 0, 0, 0, 0, 0, 0])
>>> c3 = Column("c", [0.1, 0.00, 0.4, 0.2, 0, 0, 0, 0, 0, 0, 0, 0])
>>> c4 = Column("d", [-1000000, 1000000, -1000000, -1000000, -1000000, -1000000, -1000000, -1000000, -1000000, -1000000, -1000000, -1000000])
>>> table = Table.from_columns([c1, c2, c3, c4])
>>> table.remove_rows_with_outliers()
a b c d
0 1.0 1.50 0.1 -1000000
1 1.0 0.50 0.4 -1000000
2 0.1 0.01 0.2 -1000000
3 0.0 0.00 0.0 -1000000
4 0.0 0.00 0.0 -1000000
5 0.0 0.00 0.0 -1000000
6 0.0 0.00 0.0 -1000000
7 0.0 0.00 0.0 -1000000
8 0.0 0.00 0.0 -1000000
9 0.0 0.00 0.0 -1000000
10 0.0 0.00 0.0 -1000000
"""
copy = self._data.copy(deep=True)
table_without_nonnumericals = self.remove_columns_with_non_numerical_values()
z_scores = np.absolute(stats.zscore(table_without_nonnumericals._data, nan_policy="omit"))
filter_ = ((z_scores < 3) | np.isnan(z_scores)).all(axis=1)
return Table._from_pandas_dataframe(copy[filter_], self._schema)
def rename_column(self, old_name: str, new_name: str) -> Table:
"""
Return a new `Table` with a single column renamed.
The original table is not modified.
Parameters
----------
old_name : str
The old name of the target column.
new_name : str
The new name of the target column.
Returns
-------
table : Table
The Table with the renamed column.
Raises
------
UnknownColumnNameError
If the specified old target column name does not exist.
DuplicateColumnNameError
If the specified new target column name already exists.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> table.rename_column("b", "c")
a c
0 1 2
"""
if old_name not in self._schema.column_names:
similar_columns = self._get_similar_columns(old_name)
raise UnknownColumnNameError([old_name], similar_columns)
if old_name == new_name:
return self
if new_name in self._schema.column_names:
raise DuplicateColumnNameError(new_name)
new_df = self._data.copy()
new_df.columns = self._schema.column_names
return Table._from_pandas_dataframe(new_df.rename(columns={old_name: new_name}))
def replace_column(self, old_column_name: str, new_columns: list[Column]) -> Table:
"""
Return a new table with the specified old column replaced by a list of new columns.
The order of columns is kept.
The original table is not modified.
Parameters
----------
old_column_name : str
The name of the column to be replaced.
new_columns : list[Column]
The list of new columns replacing the old column.
Returns
-------
result : Table
A table with the old column replaced by the new columns.
Raises
------
UnknownColumnNameError
If the old column does not exist.
DuplicateColumnNameError
If at least one of the new columns already exists and the existing column is not affected by the replacement.
ColumnSizeError
If the size of at least one of the new columns does not match the amount of rows.
IllegalSchemaModificationError
If replacing the column would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.containers import Column, Table
>>> table = Table.from_dict({"a": [1], "b": [2]})
>>> new_col = Column("new", [3])
>>> table.replace_column("b", [new_col])
a new
0 1 3
"""
if old_column_name not in self._schema.column_names:
similar_columns = self._get_similar_columns(old_column_name)
raise UnknownColumnNameError([old_column_name], similar_columns)
columns = list[Column]()
for old_column in self.column_names:
if old_column == old_column_name:
for new_column in new_columns:
if new_column.name in self.column_names and new_column.name != old_column_name:
raise DuplicateColumnNameError(new_column.name)
if self.number_of_rows != new_column.number_of_rows:
raise ColumnSizeError(str(self.number_of_rows), str(new_column.number_of_rows))
columns.append(new_column)
else:
columns.append(self.get_column(old_column))
return Table.from_columns(columns)
def shuffle_rows(self) -> Table:
"""
Return a new `Table` with randomly shuffled rows of this `Table`.
The original table is not modified.
Returns
-------
result : Table
The shuffled Table.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> import numpy as np
>>> np.random.seed(123456)
>>> table = Table.from_dict({"a": [1, 3, 5], "b": [2, 4, 6]})
>>> table.shuffle_rows()
a b
0 5 6
1 1 2
2 3 4
"""
new_df = self._data.sample(frac=1.0)
new_df.columns = self._schema.column_names
return Table._from_pandas_dataframe(new_df)
def slice_rows(
self,
start: int | None = None,
end: int | None = None,
step: int = 1,
) -> Table:
"""
Slice a part of the table into a new table.
The original table is not modified.
Parameters
----------
start : int | None
The first index of the range to be copied into a new table, None by default.
end : int | None
The last index of the range to be copied into a new table, None by default.
step : int
The step size used to iterate through the table, 1 by default.
Returns
-------
result : Table
The resulting table.
Raises
------
IndexOutOfBoundsError
If the index is out of bounds.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3, 5], "b": [2, 4, 6]})
>>> table.slice_rows(0, 2)
a b
0 1 2
1 3 4
"""
if start is None:
start = 0
if end is None:
end = self.number_of_rows
if end < start:
raise IndexOutOfBoundsError(slice(start, end))
if start < 0 or end < 0 or start > self.number_of_rows or end > self.number_of_rows:
raise IndexOutOfBoundsError(start if start < 0 or start > self.number_of_rows else end)
new_df = self._data.iloc[start:end:step]
new_df.columns = self._schema.column_names
return Table._from_pandas_dataframe(new_df)
def sort_columns(
self,
comparator: Callable[[Column, Column], int] = lambda col1, col2: (col1.name > col2.name)
- (col1.name < col2.name),
) -> Table:
"""
Sort the columns of a `Table` with the given comparator and return a new `Table`.
The comparator is a function that takes two columns `col1` and `col2` and
returns an integer:
* If `col1` should be ordered before `col2`, the function should return a negative number.
* If `col1` should be ordered after `col2`, the function should return a positive number.
* If the original order of `col1` and `col2` should be kept, the function should return 0.
If no comparator is given, the columns will be sorted alphabetically by their name.
The original table is not modified.
Parameters
----------
comparator : Callable[[Column, Column], int]
The function used to compare two columns.
Returns
-------
new_table : Table
A new table with sorted columns.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1], "b": [2] })
>>> table.sort_columns(lambda col1, col2: 1)
a b
0 1 2
>>> table.sort_columns(lambda col1, col2: -1)
b a
0 2 1
>>> table2 = Table.from_dict({"b": [2], "a": [1]})
>>> table2.sort_columns()
a b
0 1 2
"""
columns = self.to_columns()
columns.sort(key=functools.cmp_to_key(comparator))
return Table.from_columns(columns)
def sort_rows(self, comparator: Callable[[Row, Row], int]) -> Table:
"""
Sort the rows of a `Table` with the given comparator and return a new `Table`.
The comparator is a function that takes two rows `row1` and `row2` and
returns an integer:
* If `row1` should be ordered before `row2`, the function should return a negative number.
* If `row1` should be ordered after `row2`, the function should return a positive number.
* If the original order of `row1` and `row2` should be kept, the function should return 0.
The original table is not modified.
Parameters
----------
comparator : Callable[[Row, Row], int]
The function used to compare two rows.
Returns
-------
new_table : Table
A new table with sorted rows.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 3, 5], "b": [2, 4, 6] })
>>> table.sort_rows(lambda row1, row2: 1)
a b
0 1 2
1 3 4
2 5 6
>>> table.sort_rows(lambda row1, row2: -1)
a b
0 5 6
1 3 4
2 1 2
>>> table.sort_rows(lambda row1, row2: 0)
a b
0 1 2
1 3 4
2 5 6
"""
rows = self.to_rows()
rows.sort(key=functools.cmp_to_key(comparator))
return Table.from_rows(rows)
def split_rows(self, percentage_in_first: float) -> tuple[Table, Table]:
"""
Split the table into two new tables.
The original table is not modified.
Parameters
----------
percentage_in_first : float
The desired size of the first table in percentage to the given table; must be between 0 and 1.
Returns
-------
result : (Table, Table)
A tuple containing the two resulting tables. The first table has the specified size, the second table
contains the rest of the data.
Raises
------
ValueError:
if the 'percentage_in_first' is not between 0 and 1.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"temperature": [10, 15, 20, 25, 30], "sales": [54, 74, 90, 206, 210]})
>>> slices = table.split_rows(0.4)
>>> slices[0]
temperature sales
0 10 54
1 15 74
>>> slices[1]
temperature sales
0 20 90
1 25 206
2 30 210
"""
if percentage_in_first < 0 or percentage_in_first > 1:
raise ValueError("The given percentage is not between 0 and 1")
if self.number_of_rows == 0:
return Table(), Table()
return (
self.slice_rows(0, round(percentage_in_first * self.number_of_rows)),
self.slice_rows(round(percentage_in_first * self.number_of_rows)),
)
def tag_columns(self, target_name: str, feature_names: list[str] | None = None) -> TaggedTable:
"""
Return a new `TaggedTable` with columns marked as a target column or feature columns.
The original table is not modified.
Parameters
----------
target_name : str
Name of the target column.
feature_names : list[str] | None
Names of the feature columns. If None, all columns except the target column are used.
Returns
-------
tagged_table : TaggedTable
A new tagged table with the given target and feature names.
Raises
------
ValueError
If the target column is also a feature column.
ValueError
If no feature columns are specified.
Examples
--------
>>> from safeds.data.tabular.containers import Table, TaggedTable
>>> table = Table.from_dict({"item": ["apple", "milk", "beer"], "price": [1.10, 1.19, 1.79], "amount_bought": [74, 72, 51]})
>>> tagged_table = table.tag_columns(target_name="amount_bought", feature_names=["item", "price"])
"""
from ._tagged_table import TaggedTable
return TaggedTable._from_table(self, target_name, feature_names)
def transform_column(self, name: str, transformer: Callable[[Row], Any]) -> Table:
"""
Return a new `Table` with the provided column transformed by calling the provided transformer.
The original table is not modified.
Returns
-------
result : Table
The table with the transformed column.
Raises
------
UnknownColumnNameError
If the column does not exist.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"item": ["apple", "milk", "beer"], "price": [1.00, 1.19, 1.79]})
>>> table.transform_column("price", lambda row: row.get_value("price") * 100)
item price
0 apple 100.0
1 milk 119.0
2 beer 179.0
"""
if self.has_column(name):
items: list = [transformer(item) for item in self.to_rows()]
result: list[Column] = [Column(name, items)]
return self.replace_column(name, result)
similar_columns = self._get_similar_columns(name)
raise UnknownColumnNameError([name], similar_columns)
def transform_table(self, transformer: TableTransformer) -> Table:
"""
Return a new `Table` with a learned transformation applied to this table.
The original table is not modified.
Parameters
----------
transformer : TableTransformer
The transformer which transforms the given table.
Returns
-------
transformed_table : Table
The transformed table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
IllegalSchemaModificationError
If replacing the column would violate an invariant in the subclass.
Examples
--------
>>> from safeds.data.tabular.transformation import OneHotEncoder
>>> from safeds.data.tabular.containers import Table
>>> transformer = OneHotEncoder()
>>> table = Table.from_dict({"fruit": ["apple", "pear", "apple"], "pet": ["dog", "duck", "duck"]})
>>> transformer = transformer.fit(table, None)
>>> table.transform_table(transformer)
fruit__apple fruit__pear pet__dog pet__duck
0 1.0 0.0 1.0 0.0
1 0.0 1.0 0.0 1.0
2 1.0 0.0 0.0 1.0
"""
return transformer.transform(self)
def inverse_transform_table(self, transformer: InvertibleTableTransformer) -> Table:
"""
Return a new `Table` with the inverted transformation applied by the given transformer.
The original table is not modified.
Parameters
----------
transformer : InvertibleTableTransformer
A transformer that was fitted with columns, which are all present in the table.
Returns
-------
table : Table
The original table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
Examples
--------
>>> from safeds.data.tabular.transformation import OneHotEncoder
>>> from safeds.data.tabular.containers import Table
>>> transformer = OneHotEncoder()
>>> table = Table.from_dict({"a": ["j", "k", "k"], "b": ["x", "y", "x"]})
>>> transformer = transformer.fit(table, None)
>>> transformed_table = transformer.transform(table)
>>> transformed_table.inverse_transform_table(transformer)
a b
0 j x
1 k y
2 k x
>>> transformer.inverse_transform(transformed_table)
a b
0 j x
1 k y
2 k x
"""
return transformer.inverse_transform(self)
# ------------------------------------------------------------------------------------------------------------------
# Plotting
# ------------------------------------------------------------------------------------------------------------------
def plot_correlation_heatmap(self) -> Image:
"""
Plot a correlation heatmap for all numerical columns of this `Table`.
Returns
-------
plot: Image
The plot as an image.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"temperature": [10, 15, 20, 25, 30], "sales": [54, 74, 90, 206, 210]})
>>> image = table.plot_correlation_heatmap()
"""
only_numerical = self.remove_columns_with_non_numerical_values()
if self.number_of_rows == 0:
warnings.warn(
"An empty table has been used. A correlation heatmap on an empty table will show nothing.",
stacklevel=2,
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Attempting to set identical low and high (xlims|ylims) makes transformation singular;"
" automatically expanding."
),
)
fig = plt.figure()
sns.heatmap(
data=only_numerical._data.corr(),
vmin=-1,
vmax=1,
xticklabels=only_numerical.column_names,
yticklabels=only_numerical.column_names,
cmap="vlag",
)
plt.tight_layout()
else:
fig = plt.figure()
sns.heatmap(
data=only_numerical._data.corr(),
vmin=-1,
vmax=1,
xticklabels=only_numerical.column_names,
yticklabels=only_numerical.column_names,
cmap="vlag",
)
plt.tight_layout()
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, format_=ImageFormat.PNG)
def plot_lineplot(self, x_column_name: str, y_column_name: str) -> Image:
"""
Plot two columns against each other in a lineplot.
If there are multiple x-values for a y-value, the resulting plot will consist of a line representing the mean
and the lower-transparency area around the line representing the 95% confidence interval.
Parameters
----------
x_column_name : str
The column name of the column to be plotted on the x-Axis.
y_column_name : str
The column name of the column to be plotted on the y-Axis.
Returns
-------
plot: Image
The plot as an image.
Raises
------
UnknownColumnNameError
If either of the columns do not exist.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"temperature": [10, 15, 20, 25, 30], "sales": [54, 74, 90, 206, 210]})
>>> image = table.plot_lineplot("temperature", "sales")
"""
if not self.has_column(x_column_name) or not self.has_column(y_column_name):
similar_columns_x = self._get_similar_columns(x_column_name)
similar_columns_y = self._get_similar_columns(y_column_name)
raise UnknownColumnNameError(
([x_column_name] if not self.has_column(x_column_name) else [])
+ ([y_column_name] if not self.has_column(y_column_name) else []),
(similar_columns_x if not self.has_column(x_column_name) else [])
+ (similar_columns_y if not self.has_column(y_column_name) else []),
)
fig = plt.figure()
ax = sns.lineplot(
data=self._data,
x=x_column_name,
y=y_column_name,
)
ax.set(xlabel=x_column_name, ylabel=y_column_name)
ax.set_xticks(ax.get_xticks())
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment="right",
) # rotate the labels of the x Axis to prevent the chance of overlapping of the labels
plt.tight_layout()
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, format_=ImageFormat.PNG)
def plot_scatterplot(self, x_column_name: str, y_column_name: str) -> Image:
"""
Plot two columns against each other in a scatterplot.
Parameters
----------
x_column_name : str
The column name of the column to be plotted on the x-Axis.
y_column_name : str
The column name of the column to be plotted on the y-Axis.
Returns
-------
plot: Image
The plot as an image.
Raises
------
UnknownColumnNameError
If either of the columns do not exist.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"temperature": [10, 15, 20, 25, 30], "sales": [54, 74, 90, 206, 210]})
>>> image = table.plot_scatterplot("temperature", "sales")
"""
if not self.has_column(x_column_name) or not self.has_column(y_column_name):
similar_columns_x = self._get_similar_columns(x_column_name)
similar_columns_y = self._get_similar_columns(y_column_name)
raise UnknownColumnNameError(
([x_column_name] if not self.has_column(x_column_name) else [])
+ ([y_column_name] if not self.has_column(y_column_name) else []),
(similar_columns_x if not self.has_column(x_column_name) else [])
+ (similar_columns_y if not self.has_column(y_column_name) else []),
)
fig = plt.figure()
ax = sns.scatterplot(
data=self._data,
x=x_column_name,
y=y_column_name,
)
ax.set(xlabel=x_column_name, ylabel=y_column_name)
ax.set_xticks(ax.get_xticks())
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment="right",
) # rotate the labels of the x Axis to prevent the chance of overlapping of the labels
plt.tight_layout()
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, format_=ImageFormat.PNG)
def plot_boxplots(self) -> Image:
"""
Plot a boxplot for every numerical column.
Returns
-------
plot: Image
The plot as an image.
Raises
------
NonNumericColumnError
If the table contains only non-numerical columns.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table({"a":[1, 2], "b": [3, 42]})
>>> image = table.plot_boxplots()
"""
numerical_table = self.remove_columns_with_non_numerical_values()
if numerical_table.number_of_columns == 0:
raise NonNumericColumnError("This table contains only non-numerical columns.")
col_wrap = min(numerical_table.number_of_columns, 3)
data = pd.melt(numerical_table._data, value_vars=numerical_table.column_names)
grid = sns.FacetGrid(data, col="variable", col_wrap=col_wrap, sharex=False, sharey=False)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Using the boxplot function without specifying `order` is likely to produce an incorrect plot.",
)
grid.map(sns.boxplot, "variable", "value")
grid.set_xlabels("")
grid.set_ylabels("")
grid.set_titles("{col_name}")
for axes in grid.axes.flat:
axes.set_xticks([])
plt.tight_layout()
fig = grid.fig
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, format_=ImageFormat.PNG)
def plot_histograms(self) -> Image:
"""
Plot a histogram for every column.
Returns
-------
plot: Image
The plot as an image.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table({"a": [2, 3, 5, 1], "b": [54, 74, 90, 2014]})
>>> image = table.plot_histograms()
"""
col_wrap = min(self.number_of_columns, 3)
data = pd.melt(self._data.applymap(lambda value: str(value)), value_vars=self.column_names)
grid = sns.FacetGrid(data=data, col="variable", col_wrap=col_wrap, sharex=False, sharey=False)
grid.map(sns.histplot, "value")
grid.set_xlabels("")
grid.set_ylabels("")
grid.set_titles("{col_name}")
for axes in grid.axes.flat:
axes.set_xticks(axes.get_xticks())
axes.set_xticklabels(axes.get_xticklabels(), rotation=45, horizontalalignment="right")
grid.tight_layout()
fig = grid.fig
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close()
buffer.seek(0)
return Image(buffer, ImageFormat.PNG)
# ------------------------------------------------------------------------------------------------------------------
# Conversion
# ------------------------------------------------------------------------------------------------------------------
def to_csv_file(self, path: str | Path) -> None:
"""
Write the data from the table into a CSV file.
If the file and/or the directories do not exist they will be created. If the file already exists it will be
overwritten.
Parameters
----------
path : str | Path
The path to the output file.
Raises
------
WrongFileExtensionError
If the file is not a csv file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> table.to_csv_file("./src/resources/to_csv_file.csv")
"""
path = Path(path)
if path.suffix != ".csv":
raise WrongFileExtensionError(path, ".csv")
path.parent.mkdir(parents=True, exist_ok=True)
data_to_csv = self._data.copy()
data_to_csv.columns = self._schema.column_names
data_to_csv.to_csv(path, index=False)
def to_excel_file(self, path: str | Path) -> None:
"""
Write the data from the table into an Excel file.
Valid file extensions are `.xls`, '.xlsx', `.xlsm`, `.xlsb`, `.odf`, `.ods` and `.odt`.
If the file and/or the directories do not exist, they will be created. If the file already exists, it will be
overwritten.
Parameters
----------
path : str | Path
The path to the output file.
Raises
------
WrongFileExtensionError
If the file is not an Excel file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> table.to_excel_file("./src/resources/to_excel_file.xlsx")
"""
path = Path(path)
excel_extensions = [".xls", ".xlsx", ".xlsm", ".xlsb", ".odf", ".ods", ".odt"]
if path.suffix not in excel_extensions:
raise WrongFileExtensionError(path, excel_extensions)
# Create Excel metadata in the file
tmp_table_file = openpyxl.Workbook()
tmp_table_file.save(path)
path.parent.mkdir(parents=True, exist_ok=True)
data_to_excel = self._data.copy()
data_to_excel.columns = self._schema.column_names
data_to_excel.to_excel(path)
def to_json_file(self, path: str | Path) -> None:
"""
Write the data from the table into a JSON file.
If the file and/or the directories do not exist, they will be created. If the file already exists it will be
overwritten.
Parameters
----------
path : str | Path
The path to the output file.
Raises
------
WrongFileExtensionError
If the file is not a JSON file.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> table.to_json_file("./src/resources/to_json_file.json")
"""
path = Path(path)
if path.suffix != ".json":
raise WrongFileExtensionError(path, ".json")
path.parent.mkdir(parents=True, exist_ok=True)
data_to_json = self._data.copy()
data_to_json.columns = self._schema.column_names
data_to_json.to_json(path)
def to_dict(self) -> dict[str, list[Any]]:
"""
Return a dictionary that maps column names to column values.
Returns
-------
data : dict[str, list[Any]]
Dictionary representation of the table.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> row1 = Row({"a": 1, "b": 5})
>>> row2 = Row({"a": 2, "b": 6})
>>> table1 = Table.from_rows([row1, row2])
>>> table2 = Table.from_dict({"a": [1, 2], "b": [5, 6]})
>>> table1 == table2
True
"""
return {column_name: list(self.get_column(column_name)) for column_name in self.column_names}
def to_html(self) -> str:
"""
Return an HTML representation of the table.
Returns
-------
output : str
The generated HTML.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> html = table.to_html()
"""
return self._data.to_html(max_rows=self._data.shape[0], max_cols=self._data.shape[1])
def to_columns(self) -> list[Column]:
"""
Return a list of the columns.
Returns
-------
columns : list[Columns]
List of columns.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a":[1, 2],"b":[20, 30]})
>>> table.to_columns()
[Column('a', [1, 2]), Column('b', [20, 30])]
"""
return [self.get_column(name) for name in self._schema.column_names]
def to_rows(self) -> list[Row]:
"""
Return a list of the rows.
Returns
-------
rows : list[Row]
List of rows.
Examples
--------
>>> from safeds.data.tabular.containers import Table
>>> table = Table.from_dict({"a":[1, 2],"b":[20, 30]})
>>> table.to_rows()
[Row({
'a': 1,
'b': 20
}), Row({
'a': 2,
'b': 30
})]
"""
return [
Row._from_pandas_dataframe(
pd.DataFrame([list(series_row)], columns=self._schema.column_names),
self._schema,
)
for (_, series_row) in self._data.iterrows()
]
# ------------------------------------------------------------------------------------------------------------------
# IPython integration
# ------------------------------------------------------------------------------------------------------------------
def _repr_html_(self) -> str:
"""
Return an HTML representation of the table.
Returns
-------
output : str
The generated HTML.
"""
return self._data.to_html(max_rows=self._data.shape[0], max_cols=self._data.shape[1], notebook=True)
# ------------------------------------------------------------------------------------------------------------------
# Dataframe interchange protocol
# ------------------------------------------------------------------------------------------------------------------
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): # type: ignore[no-untyped-def]
"""
Return a DataFrame exchange object that conforms to the dataframe interchange protocol.
Generally, there is no reason to call this method directly. The dataframe interchange protocol is designed to
allow libraries to consume tabular data from different sources, such as `pandas` or `polars`. If you still
decide to call this method, you should not rely on any capabilities of the returned object beyond the dataframe
interchange protocol.
The specification of the dataframe interchange protocol can be found on
[GitHub](https://github.com/data-apis/dataframe-api).
Parameters
----------
nan_as_null : bool
Whether to replace missing values in the data with `NaN`.
allow_copy : bool
Whether memory may be copied to create the DataFrame exchange object.
Returns
-------
dataframe
A DataFrame object that conforms to the dataframe interchange protocol.
"""
if not allow_copy:
raise NotImplementedError("For the moment we need to copy the data, so `allow_copy` must be True.")
data_copy = self._data.copy()
data_copy.columns = self.column_names
return data_copy.__dataframe__(nan_as_null, allow_copy)
# ------------------------------------------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------------------------------------------
def _copy(self) -> Table:
"""
Return a copy of this table.
Returns
-------
table : Table
The copy of this table.
"""
return copy.deepcopy(self) | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/tabular/containers/_table.py | 0.863794 | 0.346154 | _table.py | pypi |
from __future__ import annotations
import copy
from typing import TYPE_CHECKING
from safeds.data.tabular.containers import Column, Row, Table
from safeds.exceptions import (
ColumnIsTargetError,
IllegalSchemaModificationError,
UnknownColumnNameError,
)
if TYPE_CHECKING:
from collections.abc import Callable, Mapping, Sequence
from typing import Any
class TaggedTable(Table):
"""
A tagged table is a table that additionally knows which columns are features and which are the target to predict.
Parameters
----------
data : Mapping[str, Sequence[Any]]
The data.
target_name : str
Name of the target column.
feature_names : list[str] | None
Names of the feature columns. If None, all columns except the target column are used.
Raises
------
ColumnLengthMismatchError
If columns have different lengths.
ValueError
If the target column is also a feature column.
ValueError
If no feature columns are specified.
Examples
--------
>>> from safeds.data.tabular.containers import Table, TaggedTable
>>> table = Table({"col1": ["a", "b"], "col2": [1, 2]})
>>> tagged_table = table.tag_columns("col2", ["col1"])
"""
# ------------------------------------------------------------------------------------------------------------------
# Creation
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def _from_table(
table: Table,
target_name: str,
feature_names: list[str] | None = None,
) -> TaggedTable:
"""
Create a tagged table from a table.
Parameters
----------
table : Table
The table.
target_name : str
Name of the target column.
feature_names : list[str] | None
Names of the feature columns. If None, all columns except the target column are used.
Returns
-------
tagged_table : TaggedTable
The created table.
Raises
------
UnknownColumnNameError
If target_name matches none of the column names.
ValueError
If the target column is also a feature column.
ValueError
If no feature columns are specified.
Examples
--------
>>> from safeds.data.tabular.containers import Table, TaggedTable
>>> table = Table({"col1": ["a", "b", "c", "a"], "col2": [1, 2, 3, 4]})
>>> tagged_table = TaggedTable._from_table(table, "col2", ["col1"])
"""
table = table._as_table()
if target_name not in table.column_names:
raise UnknownColumnNameError([target_name])
# If no feature names are specified, use all columns except the target column
if feature_names is None:
feature_names = table.column_names
feature_names.remove(target_name)
# Validate inputs
if target_name in feature_names:
raise ValueError(f"Column '{target_name}' cannot be both feature and target.")
if len(feature_names) == 0:
raise ValueError("At least one feature column must be specified.")
# Create result
result = object.__new__(TaggedTable)
result._data = table._data
result._schema = table.schema
result._features = table.keep_only_columns(feature_names)
result._target = table.get_column(target_name)
return result
# ------------------------------------------------------------------------------------------------------------------
# Dunder methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(
self,
data: Mapping[str, Sequence[Any]],
target_name: str,
feature_names: list[str] | None = None,
):
"""
Create a tagged table from a mapping of column names to their values.
Parameters
----------
data : Mapping[str, Sequence[Any]]
The data.
target_name : str
Name of the target column.
feature_names : list[str] | None
Names of the feature columns. If None, all columns except the target column are used.
Raises
------
ColumnLengthMismatchError
If columns have different lengths.
ValueError
If the target column is also a feature column.
ValueError
If no feature columns are specified.
Examples
--------
>>> from safeds.data.tabular.containers import TaggedTable
>>> table = TaggedTable({"a": [1, 2, 3], "b": [4, 5, 6]}, "b", ["a"])
"""
super().__init__(data)
_data = Table(data)
# If no feature names are specified, use all columns except the target column
if feature_names is None:
feature_names = _data.column_names
if target_name in feature_names:
feature_names.remove(target_name)
# Validate inputs
if target_name in feature_names:
raise ValueError(f"Column '{target_name}' cannot be both feature and target.")
if len(feature_names) == 0:
raise ValueError("At least one feature column must be specified.")
self._features: Table = _data.keep_only_columns(feature_names)
self._target: Column = _data.get_column(target_name)
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def features(self) -> Table:
"""
Get the feature columns of the tagged table.
Returns
-------
Table
The table containing the feature columns.
"""
return self._features
@property
def target(self) -> Column:
"""
Get the target column of the tagged table.
Returns
-------
Column
The target column.
"""
return self._target
# ------------------------------------------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------------------------------------------
def _copy(self) -> TaggedTable:
"""
Return a copy of this tagged table.
Returns
-------
table : TaggedTable
The copy of this tagged table.
"""
return copy.deepcopy(self)
# ------------------------------------------------------------------------------------------------------------------
# Specific methods from TaggedTable class:
# ------------------------------------------------------------------------------------------------------------------
def add_column_as_feature(self, column: Column) -> TaggedTable:
"""
Return a new table with the provided column attached at the end, as a feature column.
the original table is not modified.
Parameters
----------
column : Column
The column to be added.
Returns
-------
result : TaggedTable
The table with the attached feature column.
Raises
------
DuplicateColumnNameError
If the new column already exists.
ColumnSizeError
If the size of the column does not match the number of rows.
"""
return TaggedTable._from_table(
super().add_column(column),
target_name=self.target.name,
feature_names=[*self.features.column_names, column.name],
)
def add_columns_as_features(self, columns: list[Column] | Table) -> TaggedTable:
"""
Return a new `TaggedTable` with the provided columns attached at the end, as feature columns.
The original table is not modified.
Parameters
----------
columns : list[Column] | Table
The columns to be added as features.
Returns
-------
result : TaggedTable
The table with the attached feature columns.
Raises
------
DuplicateColumnNameError
If any of the new feature columns already exist.
ColumnSizeError
If the size of any feature column does not match the number of rows.
"""
return TaggedTable._from_table(
super().add_columns(columns),
target_name=self.target.name,
feature_names=self.features.column_names
+ [col.name for col in (columns.to_columns() if isinstance(columns, Table) else columns)],
)
# ------------------------------------------------------------------------------------------------------------------
# Overriden methods from Table class:
# ------------------------------------------------------------------------------------------------------------------
def _as_table(self: TaggedTable) -> Table:
"""
Return a new `Table` with the tagging removed.
The original TaggedTable is not modified.
Parameters
----------
self: TaggedTable
The TaggedTable.
Returns
-------
table: Table
The table as an untagged Table, i.e. without the information about which columns are features or target.
"""
return Table.from_columns(super().to_columns())
def add_column(self, column: Column) -> TaggedTable:
"""
Return a new `TaggedTable` with the provided column attached at the end, as neither target nor feature column.
The original table is not modified.
Parameters
----------
column : Column
The column to be added.
Returns
-------
result : TaggedTable
The table with the column attached as neither target nor feature column.
Raises
------
DuplicateColumnNameError
If the new column already exists.
ColumnSizeError
If the size of the column does not match the number of rows.
"""
return TaggedTable._from_table(
super().add_column(column),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def add_columns(self, columns: list[Column] | Table) -> TaggedTable:
"""
Return a new `TaggedTable` with multiple added columns, as neither target nor feature columns.
The original table is not modified.
Parameters
----------
columns : list[Column] or Table
The columns to be added.
Returns
-------
result: TaggedTable
A new table combining the original table and the given columns as neither target nor feature columns.
Raises
------
DuplicateColumnNameError
If at least one column name from the provided column list already exists in the table.
ColumnSizeError
If at least one of the column sizes from the provided column list does not match the table.
"""
return TaggedTable._from_table(
super().add_columns(columns),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def add_row(self, row: Row) -> TaggedTable:
"""
Return a new `TaggedTable` with an added Row attached.
The original table is not modified.
Parameters
----------
row : Row
The row to be added.
Returns
-------
table : TaggedTable
A new table with the added row at the end.
Raises
------
UnknownColumnNameError
If the row has different column names than the table.
"""
return TaggedTable._from_table(super().add_row(row), target_name=self.target.name)
def add_rows(self, rows: list[Row] | Table) -> TaggedTable:
"""
Return a new `TaggedTable` with multiple added Rows attached.
The original table is not modified.
Parameters
----------
rows : list[Row] or Table
The rows to be added.
Returns
-------
result : TaggedTable
A new table which combines the original table and the given rows.
Raises
------
UnknownColumnNameError
If at least one of the rows have different column names than the table.
"""
return TaggedTable._from_table(super().add_rows(rows), target_name=self.target.name)
def filter_rows(self, query: Callable[[Row], bool]) -> TaggedTable:
"""
Return a new `TaggedTable` containing only rows that match the given Callable (e.g. lambda function).
The original table is not modified.
Parameters
----------
query : lambda function
A Callable that is applied to all rows.
Returns
-------
table : TaggedTable
A table containing only the rows to match the query.
"""
return TaggedTable._from_table(
super().filter_rows(query),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def keep_only_columns(self, column_names: list[str]) -> TaggedTable:
"""
Return a new `TaggedTable` with only the given column(s).
The original table is not modified.
Parameters
----------
column_names : list[str]
A list containing only the columns to be kept.
Returns
-------
table : TaggedTable
A table containing only the given column(s).
Raises
------
UnknownColumnNameError
If any of the given columns does not exist.
IllegalSchemaModificationError
If none of the given columns is the target column or any of the feature columns.
"""
if self.target.name not in column_names:
raise IllegalSchemaModificationError("Must keep the target column.")
if len(set(self.features.column_names).intersection(set(column_names))) == 0:
raise IllegalSchemaModificationError("Must keep at least one feature column.")
return TaggedTable._from_table(
super().keep_only_columns(column_names),
target_name=self.target.name,
feature_names=sorted(
set(self.features.column_names).intersection(set(column_names)),
key={val: ix for ix, val in enumerate(self.features.column_names)}.__getitem__,
),
)
def remove_columns(self, column_names: list[str]) -> TaggedTable:
"""
Return a new `TaggedTable` with the given column(s) removed from the table.
The original table is not modified.
Parameters
----------
column_names : list[str]
The names of all columns to be dropped.
Returns
-------
table : TaggedTable
A table without the given columns.
Raises
------
UnknownColumnNameError
If any of the given columns does not exist.
ColumnIsTargetError
If any of the given columns is the target column.
IllegalSchemaModificationError
If the given columns contain all the feature columns.
"""
if self.target.name in column_names:
raise ColumnIsTargetError(self.target.name)
if len(set(self.features.column_names) - set(column_names)) == 0:
raise IllegalSchemaModificationError("You cannot remove every feature column.")
return TaggedTable._from_table(
super().remove_columns(column_names),
target_name=self.target.name,
feature_names=sorted(
set(self.features.column_names) - set(column_names),
key={val: ix for ix, val in enumerate(self.features.column_names)}.__getitem__,
),
)
def remove_columns_with_missing_values(self) -> TaggedTable:
"""
Return a new `TaggedTable` with every column that misses values removed.
The original table is not modified.
Returns
-------
table : TaggedTable
A table without the columns that contain missing values.
Raises
------
ColumnIsTargetError
If any of the columns to be removed is the target column.
IllegalSchemaModificationError
If the columns to remove contain all the feature columns.
"""
table = super().remove_columns_with_missing_values()
if self.target.name not in table.column_names:
raise ColumnIsTargetError(self.target.name)
if len(set(self.features.column_names).intersection(set(table.column_names))) == 0:
raise IllegalSchemaModificationError("You cannot remove every feature column.")
return TaggedTable._from_table(
table,
self.target.name,
feature_names=sorted(
set(self.features.column_names).intersection(set(table.column_names)),
key={val: ix for ix, val in enumerate(self.features.column_names)}.__getitem__,
),
)
def remove_columns_with_non_numerical_values(self) -> TaggedTable:
"""
Return a new `TaggedTable` with every column that contains non-numerical values removed.
The original table is not modified.
Returns
-------
table : TaggedTable
A table without the columns that contain non-numerical values.
Raises
------
ColumnIsTargetError
If any of the columns to be removed is the target column.
IllegalSchemaModificationError
If the columns to remove contain all the feature columns.
"""
table = super().remove_columns_with_non_numerical_values()
if self.target.name not in table.column_names:
raise ColumnIsTargetError(self.target.name)
if len(set(self.features.column_names).intersection(set(table.column_names))) == 0:
raise IllegalSchemaModificationError("You cannot remove every feature column.")
return TaggedTable._from_table(
table,
self.target.name,
feature_names=sorted(
set(self.features.column_names).intersection(set(table.column_names)),
key={val: ix for ix, val in enumerate(self.features.column_names)}.__getitem__,
),
)
def remove_duplicate_rows(self) -> TaggedTable:
"""
Return a new `TaggedTable` with all row duplicates removed.
The original table is not modified.
Returns
-------
result : TaggedTable
The table with the duplicate rows removed.
"""
return TaggedTable._from_table(
super().remove_duplicate_rows(),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def remove_rows_with_missing_values(self) -> TaggedTable:
"""
Return a new `TaggedTable` without the rows that contain missing values.
The original table is not modified.
Returns
-------
table : TaggedTable
A table without the rows that contain missing values.
"""
return TaggedTable._from_table(
super().remove_rows_with_missing_values(),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def remove_rows_with_outliers(self) -> TaggedTable:
"""
Return a new `TaggedTable` with all rows that contain at least one outlier removed.
We define an outlier as a value that has a distance of more than 3 standard deviations from the column mean.
Missing values are not considered outliers. They are also ignored during the calculation of the standard
deviation.
The original table is not modified.
Returns
-------
new_table : TaggedTable
A new table without rows containing outliers.
"""
return TaggedTable._from_table(
super().remove_rows_with_outliers(),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def rename_column(self, old_name: str, new_name: str) -> TaggedTable:
"""
Return a new `TaggedTable` with a single column renamed.
The original table is not modified.
Parameters
----------
old_name : str
The old name of the target column.
new_name : str
The new name of the target column.
Returns
-------
table : TaggedTable
The Table with the renamed column.
Raises
------
UnknownColumnNameError
If the specified old target column name does not exist.
DuplicateColumnNameError
If the specified new target column name already exists.
"""
return TaggedTable._from_table(
super().rename_column(old_name, new_name),
target_name=new_name if self.target.name == old_name else self.target.name,
feature_names=(
self.features.column_names
if old_name not in self.features.column_names
else [
column_name if column_name != old_name else new_name for column_name in self.features.column_names
]
),
)
def replace_column(self, old_column_name: str, new_columns: list[Column]) -> TaggedTable:
"""
Return a new `TaggedTable` with the specified old column replaced by a list of new columns.
If the column to be replaced is the target column, it must be replaced by exactly one column. That column
becomes the new target column. If the column to be replaced is a feature column, the new columns that replace it
all become feature columns.
The order of columns is kept. The original table is not modified.
Parameters
----------
old_column_name : str
The name of the column to be replaced.
new_columns : list[Column]
The new columns replacing the old column.
Returns
-------
result : TaggedTable
A table with the old column replaced by the new column.
Raises
------
UnknownColumnNameError
If the old column does not exist.
DuplicateColumnNameError
If the new column already exists and the existing column is not affected by the replacement.
ColumnSizeError
If the size of the column does not match the amount of rows.
IllegalSchemaModificationError
If the target column would be removed or replaced by more than one column.
"""
if old_column_name == self.target.name:
if len(new_columns) != 1:
raise IllegalSchemaModificationError(
f'Target column "{self.target.name}" can only be replaced by exactly one new column.',
)
else:
return TaggedTable._from_table(
super().replace_column(old_column_name, new_columns),
target_name=new_columns[0].name,
feature_names=self.features.column_names,
)
else:
return TaggedTable._from_table(
super().replace_column(old_column_name, new_columns),
target_name=self.target.name,
feature_names=(
self.features.column_names
if old_column_name not in self.features.column_names
else self.features.column_names[: self.features.column_names.index(old_column_name)]
+ [col.name for col in new_columns]
+ self.features.column_names[self.features.column_names.index(old_column_name) + 1 :]
),
)
def shuffle_rows(self) -> TaggedTable:
"""
Return a new `TaggedTable` with randomly shuffled rows of this table.
The original table is not modified.
Returns
-------
result : TaggedTable
The shuffled Table.
"""
return TaggedTable._from_table(
super().shuffle_rows(),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def slice_rows(
self,
start: int | None = None,
end: int | None = None,
step: int = 1,
) -> TaggedTable:
"""
Slice a part of the table into a new `TaggedTable`.
The original table is not modified.
Parameters
----------
start : int | None
The first index of the range to be copied into a new table, None by default.
end : int | None
The last index of the range to be copied into a new table, None by default.
step : int
The step size used to iterate through the table, 1 by default.
Returns
-------
result : TaggedTable
The resulting table.
Raises
------
IndexOutOfBoundsError
If the index is out of bounds.
"""
return TaggedTable._from_table(
super().slice_rows(start, end, step),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def sort_columns(
self,
comparator: Callable[[Column, Column], int] = lambda col1, col2: (col1.name > col2.name)
- (col1.name < col2.name),
) -> TaggedTable:
"""
Sort the columns of a `TaggedTable` with the given comparator and return a new `TaggedTable`.
The comparator is a function that takes two columns `col1` and `col2` and
returns an integer:
* If the function returns a negative number, `col1` will be ordered before `col2`.
* If the function returns a positive number, `col1` will be ordered after `col2`.
* If the function returns 0, the original order of `col1` and `col2` will be kept.
If no comparator is given, the columns will be sorted alphabetically by their name.
The original table is not modified.
Parameters
----------
comparator : Callable[[Column, Column], int]
The function used to compare two columns.
Returns
-------
new_table : TaggedTable
A new table with sorted columns.
"""
sorted_table = super().sort_columns(comparator)
return TaggedTable._from_table(
sorted_table,
target_name=self.target.name,
feature_names=sorted(
set(sorted_table.column_names).intersection(self.features.column_names),
key={val: ix for ix, val in enumerate(sorted_table.column_names)}.__getitem__,
),
)
def sort_rows(self, comparator: Callable[[Row, Row], int]) -> TaggedTable:
"""
Sort the rows of a `TaggedTable` with the given comparator and return a new `TaggedTable`.
The comparator is a function that takes two rows `row1` and `row2` and
returns an integer:
* If the function returns a negative number, `row1` will be ordered before `row2`.
* If the function returns a positive number, `row1` will be ordered after `row2`.
* If the function returns 0, the original order of `row1` and `row2` will be kept.
The original table is not modified.
Parameters
----------
comparator : Callable[[Row, Row], int]
The function used to compare two rows.
Returns
-------
new_table : TaggedTable
A new table with sorted rows.
"""
return TaggedTable._from_table(
super().sort_rows(comparator),
target_name=self.target.name,
feature_names=self.features.column_names,
)
def transform_column(self, name: str, transformer: Callable[[Row], Any]) -> TaggedTable:
"""
Return a new `TaggedTable` with the provided column transformed by calling the provided transformer.
The original table is not modified.
Returns
-------
result : TaggedTable
The table with the transformed column.
Raises
------
UnknownColumnNameError
If the column does not exist.
"""
return TaggedTable._from_table(
super().transform_column(name, transformer),
target_name=self.target.name,
feature_names=self.features.column_names,
) | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/tabular/containers/_tagged_table.py | 0.932974 | 0.430088 | _tagged_table.py | pypi |
from __future__ import annotations
import copy
import functools
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any
import pandas as pd
from safeds.data.tabular.typing import ColumnType, Schema
from safeds.exceptions import UnknownColumnNameError
if TYPE_CHECKING:
from collections.abc import Iterator
class Row(Mapping[str, Any]):
"""
A row is a collection of named values.
Parameters
----------
data : Mapping[str, Any] | None
The data. If None, an empty row is created.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
"""
# ------------------------------------------------------------------------------------------------------------------
# Creation
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def from_dict(data: dict[str, Any]) -> Row:
"""
Create a row from a dictionary that maps column names to column values.
Parameters
----------
data : dict[str, Any]
The data.
Returns
-------
row : Row
The created row.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row.from_dict({"a": 1, "b": 2})
"""
return Row(data)
@staticmethod
def _from_pandas_dataframe(data: pd.DataFrame, schema: Schema | None = None) -> Row:
"""
Create a row from a `pandas.DataFrame`.
Parameters
----------
data : pd.DataFrame
The data.
schema : Schema | None
The schema. If None, the schema is inferred from the data.
Returns
-------
row : Row
The created row.
Raises
------
ValueError
If the dataframe does not contain exactly one row.
Examples
--------
>>> import pandas as pd
>>> from safeds.data.tabular.containers import Row
>>> row = Row._from_pandas_dataframe(pd.DataFrame({"a": [1], "b": [2]}))
"""
if data.shape[0] != 1:
raise ValueError("The dataframe has to contain exactly one row.")
data = data.reset_index(drop=True)
result = object.__new__(Row)
result._data = data
if schema is None:
# noinspection PyProtectedMember
result._schema = Schema._from_pandas_dataframe(data)
else:
result._schema = schema
return result
# ------------------------------------------------------------------------------------------------------------------
# Dunder methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, data: Mapping[str, Any] | None = None) -> None:
"""
Create a row from a mapping of column names to column values.
Parameters
----------
data : Mapping[str, Any] | None
The data. If None, an empty row is created.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
"""
if data is None:
data = {}
data = {key: [value] for key, value in data.items()}
self._data: pd.DataFrame = pd.DataFrame(data)
# noinspection PyProtectedMember
self._schema: Schema = Schema._from_pandas_dataframe(self._data)
def __contains__(self, obj: Any) -> bool:
"""
Check whether the row contains an object as key.
Parameters
----------
obj : Any
The object.
Returns
-------
has_column : bool
True, if the row contains the object as key, False otherwise.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> "a" in row
True
>>> "c" in row
False
"""
return isinstance(obj, str) and self.has_column(obj)
def __eq__(self, other: Any) -> bool:
"""
Check whether this row is equal to another object.
Parameters
----------
other : Any
The other object.
Returns
-------
equal : bool
True if the other object is an identical row. False if the other object is a different row. NotImplemented
if the other object is not a row.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row1 = Row({"a": 1, "b": 2})
>>> row2 = Row({"a": 1, "b": 2})
>>> row1 == row2
True
>>> row3 = Row({"a": 1, "b": 3})
>>> row1 == row3
False
"""
if not isinstance(other, Row):
return NotImplemented
if self is other:
return True
return self._schema == other._schema and self._data.equals(other._data)
def __getitem__(self, column_name: str) -> Any:
"""
Return the value of a specified column.
Parameters
----------
column_name : str
The column name.
Returns
-------
value : Any
The column value.
Raises
------
UnknownColumnNameError
If the row does not contain the specified column.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row["a"]
1
"""
return self.get_value(column_name)
def __iter__(self) -> Iterator[Any]:
"""
Create an iterator for the column names of this row.
Returns
-------
iterator : Iterator[Any]
The iterator.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> list(row)
['a', 'b']
"""
return iter(self.column_names)
def __len__(self) -> int:
"""
Return the number of columns in this row.
Returns
-------
number_of_columns : int
The number of columns.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> len(row)
2
"""
return self._data.shape[1]
def __repr__(self) -> str:
"""
Return an unambiguous string representation of this row.
Returns
-------
representation : str
The string representation.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1})
>>> repr(row)
"Row({'a': 1})"
"""
return f"Row({self!s})"
def __str__(self) -> str:
"""
Return a user-friendly string representation of this row.
Returns
-------
representation : str
The string representation.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1})
>>> str(row)
"{'a': 1}"
"""
match len(self):
case 0:
return "{}"
case 1:
return str(self.to_dict())
case _:
lines = (f" {name!r}: {value!r}" for name, value in self.to_dict().items())
joined = ",\n".join(lines)
return f"{{\n{joined}\n}}"
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def column_names(self) -> list[str]:
"""
Return a list of all column names in the row.
Returns
-------
column_names : list[str]
The column names.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.column_names
['a', 'b']
"""
return self._schema.column_names
@property
def number_of_column(self) -> int:
"""
Return the number of columns in this row.
Returns
-------
number_of_column : int
The number of columns.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.number_of_column
2
"""
return self._data.shape[1]
@property
def schema(self) -> Schema:
"""
Return the schema of the row.
Returns
-------
schema : Schema
The schema.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> schema = row.schema
"""
return self._schema
# ------------------------------------------------------------------------------------------------------------------
# Getters
# ------------------------------------------------------------------------------------------------------------------
def get_value(self, column_name: str) -> Any:
"""
Return the value of a specified column.
Parameters
----------
column_name : str
The column name.
Returns
-------
value : Any
The column value.
Raises
------
UnknownColumnNameError
If the row does not contain the specified column.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.get_value("a")
1
"""
if not self.has_column(column_name):
raise UnknownColumnNameError([column_name])
return self._data.loc[0, column_name]
def has_column(self, column_name: str) -> bool:
"""
Check whether the row contains a given column.
Parameters
----------
column_name : str
The column name.
Returns
-------
has_column : bool
True, if the row contains the column, False otherwise.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.has_column("a")
True
>>> row.has_column("c")
False
"""
return self._schema.has_column(column_name)
def get_column_type(self, column_name: str) -> ColumnType:
"""
Return the type of the specified column.
Parameters
----------
column_name : str
The column name.
Returns
-------
type : ColumnType
The type of the column.
Raises
------
UnknownColumnNameError
If the row does not contain the specified column.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.get_column_type("a")
Integer
"""
return self._schema.get_column_type(column_name)
# ------------------------------------------------------------------------------------------------------------------
# Transformations
# ------------------------------------------------------------------------------------------------------------------
def sort_columns(
self,
comparator: Callable[[tuple, tuple], int] = lambda col1, col2: (col1[0] > col2[0]) - (col1[0] < col2[0]),
) -> Row:
"""
Sort the columns of a `Row` with the given comparator and return a new `Row`.
The original row is not modified. The comparator is a function that takes two tuples of (ColumnName,
Value) `col1` and `col2` and returns an integer:
* If `col1` should be ordered before `col2`, the function should return a negative number.
* If `col1` should be ordered after `col2`, the function should return a positive number.
* If the original order of `col1` and `col2` should be kept, the function should return 0.
If no comparator is given, the columns will be sorted alphabetically by their name.
Parameters
----------
comparator : Callable[[tuple, tuple], int]
The function used to compare two tuples of (ColumnName, Value).
Returns
-------
new_row : Row
A new row with sorted columns.
"""
sorted_row_dict = dict(sorted(self.to_dict().items(), key=functools.cmp_to_key(comparator)))
return Row.from_dict(sorted_row_dict)
# ------------------------------------------------------------------------------------------------------------------
# Conversion
# ------------------------------------------------------------------------------------------------------------------
def to_dict(self) -> dict[str, Any]:
"""
Return a dictionary that maps column names to column values.
Returns
-------
data : dict[str, Any]
Dictionary representation of the row.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> row.to_dict()
{'a': 1, 'b': 2}
"""
return {column_name: self.get_value(column_name) for column_name in self.column_names}
def to_html(self) -> str:
"""
Return an HTML representation of the row.
Returns
-------
output : str
The generated HTML.
Examples
--------
>>> from safeds.data.tabular.containers import Row
>>> row = Row({"a": 1, "b": 2})
>>> html = row.to_html()
"""
return self._data.to_html(max_rows=1, max_cols=self._data.shape[1])
# ------------------------------------------------------------------------------------------------------------------
# IPython integration
# ------------------------------------------------------------------------------------------------------------------
def _repr_html_(self) -> str:
"""
Return an HTML representation of the row.
Returns
-------
output : str
The generated HTML.
"""
return self._data.to_html(max_rows=1, max_cols=self._data.shape[1], notebook=True)
# ------------------------------------------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------------------------------------------
def _copy(self) -> Row:
"""
Return a copy of this row.
Returns
-------
copy : Row
The copy of this row.
"""
return copy.deepcopy(self) | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/tabular/containers/_row.py | 0.94353 | 0.529993 | _row.py | pypi |
from __future__ import annotations
import copy
import io
from collections.abc import Sequence
from numbers import Number
from typing import TYPE_CHECKING, Any, TypeVar, overload
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from safeds.data.image.containers import Image
from safeds.data.image.typing import ImageFormat
from safeds.data.tabular.typing import ColumnType
from safeds.exceptions import (
ColumnLengthMismatchError,
ColumnSizeError,
IndexOutOfBoundsError,
NonNumericColumnError,
)
if TYPE_CHECKING:
from collections.abc import Callable, Iterator
T = TypeVar("T")
R = TypeVar("R")
class Column(Sequence[T]):
"""
A column is a named collection of values.
Parameters
----------
name : str
The name of the column.
data : Sequence[T]
The data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
"""
# ------------------------------------------------------------------------------------------------------------------
# Creation
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def _from_pandas_series(data: pd.Series, type_: ColumnType | None = None) -> Column:
"""
Create a column from a `pandas.Series`.
Parameters
----------
data : pd.Series
The data.
type_ : ColumnType | None
The type. If None, the type is inferred from the data.
Returns
-------
column : Column
The created column.
Examples
--------
>>> import pandas as pd
>>> from safeds.data.tabular.containers import Column
>>> column = Column._from_pandas_series(pd.Series([1, 2, 3], name="test"))
"""
result = object.__new__(Column)
result._name = data.name
result._data = data
# noinspection PyProtectedMember
result._type = type_ if type_ is not None else ColumnType._data_type(data)
return result
# ------------------------------------------------------------------------------------------------------------------
# Dunder methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, name: str, data: Sequence[T] | None = None) -> None:
"""
Create a column.
Parameters
----------
name : str
The name of the column.
data : Sequence[T] | None
The data. If None, an empty column is created.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
"""
if data is None:
data = []
self._name: str = name
self._data: pd.Series = data.rename(name) if isinstance(data, pd.Series) else pd.Series(data, name=name)
# noinspection PyProtectedMember
self._type: ColumnType = ColumnType._data_type(self._data)
def __contains__(self, item: Any) -> bool:
return item in self._data
def __eq__(self, other: object) -> bool:
"""
Check whether this column is equal to another object.
Parameters
----------
other : object
The other object.
Returns
-------
equal : bool
True if the other object is an identical column. False if the other object is a different column.
NotImplemented if the other object is not a column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3])
>>> column2 = Column("test", [1, 2, 3])
>>> column1 == column2
True
>>> column3 = Column("test", [3, 4, 5])
>>> column1 == column3
False
"""
if not isinstance(other, Column):
return NotImplemented
if self is other:
return True
return self.name == other.name and self._data.equals(other._data)
@overload
def __getitem__(self, index: int) -> T:
...
@overload
def __getitem__(self, index: slice) -> Column[T]:
...
def __getitem__(self, index: int | slice) -> T | Column[T]:
"""
Return the value of the specified row or rows.
Parameters
----------
index : int | slice
The index of the row, or a slice specifying the start and end index.
Returns
-------
value : Any
The single row's value, or rows' values.
Raises
------
IndexOutOfBoundsError
If the given index or indices do not exist in the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column[0]
1
"""
if isinstance(index, int):
if index < 0 or index >= self._data.size:
raise IndexOutOfBoundsError(index)
return self._data[index]
if isinstance(index, slice):
if index.start < 0 or index.start > self._data.size:
raise IndexOutOfBoundsError(index)
if index.stop < 0 or index.stop > self._data.size:
raise IndexOutOfBoundsError(index)
data = self._data[index].reset_index(drop=True).rename(self.name)
return Column._from_pandas_series(data, self._type)
def __iter__(self) -> Iterator[T]:
r"""
Create an iterator for the data of this column. This way e.g. for-each loops can be used on it.
Returns
-------
iterator : Iterator[Any]
The iterator.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", ["A", "B", "C"])
>>> string = ""
>>> for val in column:
... string += val + ", "
>>> string
'A, B, C, '
"""
return iter(self._data)
def __len__(self) -> int:
"""
Return the size of the column.
Returns
-------
n_rows : int
The size of the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> len(column)
3
"""
return len(self._data)
def __repr__(self) -> str:
"""
Return an unambiguous string representation of this column.
Returns
-------
representation : str
The string representation.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> repr(column)
"Column('test', [1, 2, 3])"
"""
return f"Column({self._name!r}, {list(self._data)!r})"
def __str__(self) -> str:
"""
Return a user-friendly string representation of this column.
Returns
-------
representation : str
The string representation.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> str(column)
"'test': [1, 2, 3]"
"""
return f"{self._name!r}: {list(self._data)!r}"
# ------------------------------------------------------------------------------------------------------------------
# Properties
# ------------------------------------------------------------------------------------------------------------------
@property
def name(self) -> str:
"""
Return the name of the column.
Returns
-------
name : str
The name of the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.name
'test'
"""
return self._name
@property
def number_of_rows(self) -> int:
"""
Return the number of elements in the column.
Returns
-------
number_of_rows : int
The number of elements.
"""
return len(self._data)
@property
def type(self) -> ColumnType:
"""
Return the type of the column.
Returns
-------
type : ColumnType
The type of the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.type
Integer
>>> column = Column("test", ['a', 'b', 'c'])
>>> column.type
String
"""
return self._type
# ------------------------------------------------------------------------------------------------------------------
# Getters
# ------------------------------------------------------------------------------------------------------------------
def get_unique_values(self) -> list[T]:
"""
Return a list of all unique values in the column.
Returns
-------
unique_values : list[T]
List of unique values in the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3, 2, 4, 3])
>>> column.get_unique_values()
[1, 2, 3, 4]
"""
return list(self._data.unique())
def get_value(self, index: int) -> T:
"""
Return column value at specified index, starting at 0.
Parameters
----------
index : int
Index of requested element.
Returns
-------
value
Value at index in column.
Raises
------
IndexOutOfBoundsError
If the given index does not exist in the column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.get_value(1)
2
"""
if index < 0 or index >= self._data.size:
raise IndexOutOfBoundsError(index)
return self._data[index]
# ------------------------------------------------------------------------------------------------------------------
# Information
# ------------------------------------------------------------------------------------------------------------------
def all(self, predicate: Callable[[T], bool]) -> bool:
"""
Check if all values have a given property.
Parameters
----------
predicate : Callable[[T], bool])
Callable that is used to find matches.
Returns
-------
result : bool
True if all match.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.all(lambda x: x < 4)
True
>>> column.all(lambda x: x < 2)
False
"""
return all(predicate(value) for value in self._data)
def any(self, predicate: Callable[[T], bool]) -> bool:
"""
Check if any value has a given property.
Parameters
----------
predicate : Callable[[T], bool])
Callable that is used to find matches.
Returns
-------
result : bool
True if any match.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.any(lambda x: x < 2)
True
>>> column.any(lambda x: x < 1)
False
"""
return any(predicate(value) for value in self._data)
def none(self, predicate: Callable[[T], bool]) -> bool:
"""
Check if no values has a given property.
Parameters
----------
predicate : Callable[[T], bool])
Callable that is used to find matches.
Returns
-------
result : bool
True if none match.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3])
>>> column1.none(lambda x: x < 1)
True
>>> column2 = Column("test", [1, 2, 3])
>>> column2.none(lambda x: x > 1)
False
"""
return all(not predicate(value) for value in self._data)
def has_missing_values(self) -> bool:
"""
Return whether the column has missing values.
Returns
-------
missing_values_exist : bool
True if missing values exist.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3, None])
>>> column1.has_missing_values()
True
>>> column2 = Column("test", [1, 2, 3])
>>> column2.has_missing_values()
False
"""
return self.any(lambda value: value is None or (isinstance(value, Number) and np.isnan(value)))
# ------------------------------------------------------------------------------------------------------------------
# Transformations
# ------------------------------------------------------------------------------------------------------------------
def rename(self, new_name: str) -> Column:
"""
Return a new column with a new name.
The original column is not modified.
Parameters
----------
new_name : str
The new name of the column.
Returns
-------
column : Column
A new column with the new name.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.rename("new name")
Column('new name', [1, 2, 3])
"""
return Column._from_pandas_series(self._data.rename(new_name), self._type)
def transform(self, transformer: Callable[[T], R]) -> Column[R]:
"""
Apply a transform method to every data point.
The original column is not modified.
Parameters
----------
transformer : Callable[[T], R]
Function that will be applied to all data points.
Returns
-------
transformed_column: Column
The transformed column.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> price = Column("price", [4.99, 5.99, 2.49])
>>> sale = price.transform(lambda amount: amount * 0.8)
"""
return Column(self.name, self._data.apply(transformer, convert_dtype=True))
# ------------------------------------------------------------------------------------------------------------------
# Statistics
# ------------------------------------------------------------------------------------------------------------------
def correlation_with(self, other_column: Column) -> float:
"""
Calculate Pearson correlation between this and another column. Both columns have to be numerical.
Returns
-------
correlation : float
Correlation between the two columns.
Raises
------
NonNumericColumnError
If one of the columns is not numerical.
ColumnLengthMismatchError
If the columns have different lengths.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3])
>>> column2 = Column("test", [2, 4, 6])
>>> column1.correlation_with(column2)
1.0
>>> column1 = Column("test", [1, 2, 3])
>>> column2 = Column("test", [0.5, 4, -6])
>>> column1.correlation_with(column2)
-0.6404640308067906
"""
if not self._type.is_numeric() or not other_column._type.is_numeric():
raise NonNumericColumnError(
f"Columns must be numerical. {self.name} is {self._type}, {other_column.name} is {other_column._type}.",
)
if self._data.size != other_column._data.size:
raise ColumnLengthMismatchError(
f"{self.name} is of size {self._data.size}, {other_column.name} is of size {other_column._data.size}.",
)
return self._data.corr(other_column._data)
def idness(self) -> float:
r"""
Calculate the idness of this column.
We define the idness as follows:
$$
\frac{\text{number of different values}}{\text{number of rows}}
$$
Returns
-------
idness : float
The idness of the column.
Raises
------
ColumnSizeError
If this column is empty.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3])
>>> column1.idness()
1.0
>>> column2 = Column("test", [1, 2, 3, 2])
>>> column2.idness()
0.75
"""
if self._data.size == 0:
raise ColumnSizeError("> 0", "0")
return self._data.nunique() / self._data.size
def maximum(self) -> float:
"""
Return the maximum value of the column. The column has to be numerical.
Returns
-------
max : float
The maximum value.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.maximum()
3
"""
if not self._type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.max()
def mean(self) -> float:
"""
Return the mean value of the column. The column has to be numerical.
Returns
-------
mean : float
The mean value.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.mean()
2.0
"""
if not self._type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.mean()
def median(self) -> float:
"""
Return the median value of the column. The column has to be numerical.
Returns
-------
median : float
The median value.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3, 4])
>>> column.median()
2.5
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3, 4, 5])
>>> column.median()
3.0
"""
if not self._type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.median()
def minimum(self) -> float:
"""
Return the minimum value of the column. The column has to be numerical.
Returns
-------
min : float
The minimum value.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3, 4])
>>> column.minimum()
1
"""
if not self._type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.min()
def missing_value_ratio(self) -> float:
"""
Return the ratio of missing values to the total number of elements in the column.
Returns
-------
ratio : float
The ratio of missing values to the total number of elements in the column.
Raises
------
ColumnSizeError
If the column is empty.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3, 4])
>>> column1.missing_value_ratio()
0.0
>>> column2 = Column("test", [1, 2, 3, None])
>>> column2.missing_value_ratio()
0.25
"""
if self._data.size == 0:
raise ColumnSizeError("> 0", "0")
return self._count_missing_values() / self._data.size
def mode(self) -> list[T]:
"""
Return the mode of the column.
Returns
-------
mode: list[T]
Returns a list with the most common values.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3, 3, 4])
>>> column1.mode()
[3]
>>> column2 = Column("test", [1, 2, 3, 3, 4, 4])
>>> column2.mode()
[3, 4]
"""
return self._data.mode().tolist()
def stability(self) -> float:
r"""
Calculate the stability of this column.
We define the stability as follows:
$$
\frac{\text{number of occurrences of most common non-null value}}{\text{number of non-null values}}
$$
The stability is not definded for a column with only null values.
Returns
-------
stability : float
The stability of the column.
Raises
------
ColumnSizeError
If the column is empty.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 1, 2, 3])
>>> column1.stability()
0.5
>>> column2 = Column("test", [1, 2, 2, 2, 3])
>>> column2.stability()
0.6
"""
if self._data.size == 0:
raise ColumnSizeError("> 0", "0")
if self.all(lambda x: x is None):
raise ValueError("Stability is not definded for a column with only null values.")
return self._data.value_counts()[self.mode()[0]] / self._data.count()
def standard_deviation(self) -> float:
"""
Return the standard deviation of the column. The column has to be numerical.
Returns
-------
sum : float
The standard deviation of all values.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column1 = Column("test", [1, 2, 3])
>>> column1.standard_deviation()
1.0
>>> column2 = Column("test", [1, 2, 4, 8, 16])
>>> column2.standard_deviation()
6.099180272790763
"""
if not self.type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.std()
def sum(self) -> float:
"""
Return the sum of the column. The column has to be numerical.
Returns
-------
sum : float
The sum of all values.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.sum()
6
"""
if not self.type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.sum()
def variance(self) -> float:
"""
Return the variance of the column. The column has to be numerical.
Returns
-------
sum : float
The variance of all values.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3, 4, 5])
>>> column.variance()
2.5
"""
if not self.type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
return self._data.var()
# ------------------------------------------------------------------------------------------------------------------
# Plotting
# ------------------------------------------------------------------------------------------------------------------
def plot_boxplot(self) -> Image:
"""
Plot this column in a boxplot. This function can only plot real numerical data.
Returns
-------
plot: Image
The plot as an image.
Raises
------
NonNumericColumnError
If the data contains non-numerical data.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> boxplot = column.plot_boxplot()
"""
if not self.type.is_numeric():
raise NonNumericColumnError(f"{self.name} is of type {self._type}.")
fig = plt.figure()
ax = sns.boxplot(data=self._data)
ax.set(title=self.name)
ax.set_xticks([])
plt.tight_layout()
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, ImageFormat.PNG)
def plot_histogram(self) -> Image:
"""
Plot a column in a histogram.
Returns
-------
plot: Image
The plot as an image.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> histogram = column.plot_histogram()
"""
fig = plt.figure()
ax = sns.histplot(data=self._data)
ax.set_xticks(ax.get_xticks())
ax.set(xlabel=self.name)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment="right",
) # rotate the labels of the x Axis to prevent the chance of overlapping of the labels
plt.tight_layout()
buffer = io.BytesIO()
fig.savefig(buffer, format="png")
plt.close() # Prevents the figure from being displayed directly
buffer.seek(0)
return Image(buffer, ImageFormat.PNG)
# ------------------------------------------------------------------------------------------------------------------
# Conversion
# ------------------------------------------------------------------------------------------------------------------
def to_html(self) -> str:
r"""
Return an HTML representation of the column.
Returns
-------
output : str
The generated HTML.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("test", [1, 2, 3])
>>> column.to_html()
'<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>test</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>1</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2</td>\n </tr>\n <tr>\n <th>2</th>\n <td>3</td>\n </tr>\n </tbody>\n</table>'
"""
frame = self._data.to_frame()
frame.columns = [self.name]
return frame.to_html(max_rows=self._data.size, max_cols=1)
# ------------------------------------------------------------------------------------------------------------------
# IPython integration
# ------------------------------------------------------------------------------------------------------------------
def _repr_html_(self) -> str:
r"""
Return an HTML representation of the column.
Returns
-------
output : str
The generated HTML.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("col_1", ['a', 'b', 'c'])
>>> column._repr_html_()
'<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>col_1</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>a</td>\n </tr>\n <tr>\n <th>1</th>\n <td>b</td>\n </tr>\n <tr>\n <th>2</th>\n <td>c</td>\n </tr>\n </tbody>\n</table>\n</div>'
"""
frame = self._data.to_frame()
frame.columns = [self.name]
return frame.to_html(max_rows=self._data.size, max_cols=1, notebook=True)
# ------------------------------------------------------------------------------------------------------------------
# Other
# ------------------------------------------------------------------------------------------------------------------
def _count_missing_values(self) -> int:
"""
Return the number of null values in the column.
Returns
-------
count : int
The number of null values.
Examples
--------
>>> from safeds.data.tabular.containers import Column
>>> column = Column("col_1", [None, 'a', None])
>>> column._count_missing_values()
2
"""
return self._data.isna().sum()
# ------------------------------------------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------------------------------------------
def _copy(self) -> Column:
"""
Return a copy of this column.
Returns
-------
column : Column
The copy of this column.
"""
return copy.deepcopy(self) | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/tabular/containers/_column.py | 0.947442 | 0.426262 | _column.py | pypi |
from __future__ import annotations
from sklearn.preprocessing import KBinsDiscretizer as sk_KBinsDiscretizer
from safeds.data.tabular.containers import Table
from safeds.data.tabular.transformation._table_transformer import TableTransformer
from safeds.exceptions import (
ClosedBound,
NonNumericColumnError,
OutOfBoundsError,
TransformerNotFittedError,
UnknownColumnNameError,
)
class Discretizer(TableTransformer):
"""
The Discretizer bins continuous data into intervals.
Parameters
----------
number_of_bins: float
The number of bins to be created.
Raises
------
OutOfBoundsError
If the given number_of_bins is less than 2.
"""
def __init__(self, number_of_bins: float = 5):
self._column_names: list[str] | None = None
self._wrapped_transformer: sk_KBinsDiscretizer | None = None
if number_of_bins < 2:
raise OutOfBoundsError(number_of_bins, name="number_of_bins", lower_bound=ClosedBound(2))
self._number_of_bins = number_of_bins
def fit(self, table: Table, column_names: list[str] | None) -> Discretizer:
"""
Learn a transformation for a set of columns in a table.
This transformer is not modified.
Parameters
----------
table : Table
The table used to fit the transformer.
column_names : list[str] | None
The list of columns from the table used to fit the transformer. If `None`, all columns are used.
Returns
-------
fitted_transformer : TableTransformer
The fitted transformer.
Raises
------
ValueError
If the table is empty.
NonNumericColumnError
If one of the columns, that should be fitted is non-numeric.
UnknownColumnNameError
If one of the columns, that should be fitted is not in the table.
"""
if table.number_of_rows == 0:
raise ValueError("The Discretizer cannot be fitted because the table contains 0 rows")
if column_names is None:
column_names = table.column_names
else:
missing_columns = set(column_names) - set(table.column_names)
if len(missing_columns) > 0:
raise UnknownColumnNameError(
sorted(
missing_columns,
key={val: ix for ix, val in enumerate(column_names)}.__getitem__,
),
)
for column in column_names:
if not table.get_column(column).type.is_numeric():
raise NonNumericColumnError(f"{column} is of type {table.get_column(column).type}.")
wrapped_transformer = sk_KBinsDiscretizer(n_bins=self._number_of_bins, encode="ordinal")
wrapped_transformer.fit(table._data[column_names])
result = Discretizer(self._number_of_bins)
result._wrapped_transformer = wrapped_transformer
result._column_names = column_names
return result
def transform(self, table: Table) -> Table:
"""
Apply the learned transformation to a table.
The table is not modified.
Parameters
----------
table : Table
The table to which the learned transformation is applied.
Returns
-------
transformed_table : Table
The transformed table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
ValueError
If the table is empty.
UnknownColumnNameError
If one of the columns, that should be transformed is not in the table.
NonNumericColumnError
If one of the columns, that should be fitted is non-numeric.
"""
# Transformer has not been fitted yet
if self._wrapped_transformer is None or self._column_names is None:
raise TransformerNotFittedError
if table.number_of_rows == 0:
raise ValueError("The table cannot be transformed because it contains 0 rows")
# Input table does not contain all columns used to fit the transformer
missing_columns = set(self._column_names) - set(table.column_names)
if len(missing_columns) > 0:
raise UnknownColumnNameError(
sorted(
missing_columns,
key={val: ix for ix, val in enumerate(self._column_names)}.__getitem__,
),
)
for column in self._column_names:
if not table.get_column(column).type.is_numeric():
raise NonNumericColumnError(f"{column} is of type {table.get_column(column).type}.")
data = table._data.copy()
data.columns = table.column_names
data[self._column_names] = self._wrapped_transformer.transform(data[self._column_names])
return Table._from_pandas_dataframe(data)
def is_fitted(self) -> bool:
"""
Check if the transformer is fitted.
Returns
-------
is_fitted : bool
Whether the transformer is fitted.
"""
return self._wrapped_transformer is not None
def get_names_of_added_columns(self) -> list[str]:
"""
Get the names of all new columns that have been added by the Discretizer.
Returns
-------
added_columns : list[str]
A list of names of the added columns, ordered as they will appear in the table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if not self.is_fitted():
raise TransformerNotFittedError
return []
# (Must implement abstract method, cannot instantiate class otherwise.)
def get_names_of_changed_columns(self) -> list[str]:
"""
Get the names of all columns that may have been changed by the Discretizer.
Returns
-------
changed_columns : list[str]
The list of (potentially) changed column names, as passed to fit.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if self._column_names is None:
raise TransformerNotFittedError
return self._column_names
def get_names_of_removed_columns(self) -> list[str]:
"""
Get the names of all columns that have been removed by the Discretizer.
Returns
-------
removed_columns : list[str]
A list of names of the removed columns, ordered as they appear in the table the Discretizer was fitted on.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if not self.is_fitted():
raise TransformerNotFittedError
return [] | /safe_ds-0.15.0.tar.gz/safe_ds-0.15.0/src/safeds/data/tabular/transformation/_discretizer.py | 0.962027 | 0.522811 | _discretizer.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.