index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
31,446
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_watcher.py
|
from __future__ import annotations
import time
from typing import TYPE_CHECKING
from fava.core.watcher import Watcher
if TYPE_CHECKING: # pragma: no cover
from pathlib import Path
def test_watcher_file(tmp_path: Path) -> None:
file1 = tmp_path / "file1"
file2 = tmp_path / "file2"
file1.write_text("test")
file2.write_text("test")
watcher = Watcher()
watcher.update([file1, file2], [])
assert not watcher.check()
# time.time is too precise
time.sleep(0.05)
file1.write_text("test2")
assert watcher.check()
def test_watcher_deleted_file(tmp_path: Path) -> None:
file1 = tmp_path / "file1"
file1.write_text("test")
watcher = Watcher()
watcher.update([file1], [])
assert not watcher.check()
file1.unlink()
assert watcher.check()
def test_watcher_folder(tmp_path: Path) -> None:
folder = tmp_path / "folder"
folder.mkdir()
(folder / "bar").mkdir()
watcher = Watcher()
watcher.update([], [folder])
assert not watcher.check()
# time.time is too precise
time.sleep(0.05)
(folder / "bar2").mkdir()
assert watcher.check()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,447
|
beancount/fava
|
refs/heads/main
|
/tests/test_core.py
|
from __future__ import annotations
import datetime
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.core import FavaLedger
def test_apiexception() -> None:
with pytest.raises(FavaAPIError) as exception:
raise FavaAPIError("error")
assert str(exception.value) == "error"
def test_attributes(example_ledger: FavaLedger) -> None:
assert len(example_ledger.attributes.accounts) == 61
assert "Assets" not in example_ledger.attributes.accounts
def test_paths_to_watch(
example_ledger: FavaLedger,
monkeypatch: pytest.MonkeyPatch,
) -> None:
assert example_ledger.paths_to_watch() == (
[Path(example_ledger.beancount_file_path)],
[],
)
monkeypatch.setitem(example_ledger.options, "documents", ["folder"])
base = Path(example_ledger.beancount_file_path).parent / "folder"
assert example_ledger.paths_to_watch() == (
[Path(example_ledger.beancount_file_path)],
[
base / account
for account in [
"Assets",
"Liabilities",
"Equity",
"Income",
"Expenses",
]
],
)
def test_account_metadata(example_ledger: FavaLedger) -> None:
data = example_ledger.accounts["Assets:US:BofA"].meta
assert data["address"] == "123 America Street, LargeTown, USA"
assert data["institution"] == "Bank of America"
assert not example_ledger.accounts["Assets"].meta
assert not example_ledger.accounts["NOACCOUNT"].meta
def test_group_entries(
example_ledger: FavaLedger,
load_doc_entries: list[Directive],
) -> None:
"""
2010-11-12 * "test"
Assets:T 4.00 USD
Expenses:T
2010-11-12 * "test"
Assets:T 4.00 USD
Expenses:T
2012-12-12 note Expenses:T "test"
"""
assert len(load_doc_entries) == 3
data = example_ledger.group_entries_by_type(load_doc_entries)
assert data.Note == [load_doc_entries[2]]
assert data.Transaction == load_doc_entries[0:2]
def test_account_uptodate_status(example_ledger: FavaLedger) -> None:
accounts = example_ledger.accounts
assert accounts["Assets:US:BofA"].uptodate_status is None
assert accounts["Assets:US:BofA:Checking"].uptodate_status == "yellow"
assert accounts["Liabilities:US:Chase:Slate"].uptodate_status == "green"
def test_account_balance_directive(example_ledger: FavaLedger) -> None:
today = datetime.date.today()
bal = f"{today} balance Assets:US:BofA:Checking 1632.79 USD\n"
assert (
example_ledger.accounts["Assets:US:BofA:Checking"].balance_string
== bal
)
assert example_ledger.accounts.all_balance_directives() == bal
def test_commodity_names(example_ledger: FavaLedger) -> None:
assert example_ledger.commodities.name("USD") == "US Dollar"
assert example_ledger.commodities.name("NOCOMMODITY") == "NOCOMMODITY"
assert example_ledger.commodities.name("VMMXX") == "VMMXX"
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,448
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_commodities.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
def test_commodity_names(example_ledger: FavaLedger) -> None:
assert example_ledger.commodities.name("USD") == "US Dollar"
assert example_ledger.commodities.name("NOCOMMODITY") == "NOCOMMODITY"
assert example_ledger.commodities.name("VMMXX") == "VMMXX"
def test_commodity_precision(example_ledger: FavaLedger) -> None:
assert example_ledger.commodities.precisions == {
"USD": 2,
"VMMXX": 4,
"VACHR": 0,
}
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,449
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_documents.py
|
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from fava.core.documents import filepath_in_document_folder
from fava.core.documents import is_document_or_import_file
from fava.helpers import FavaAPIError
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
def test_is_document_or_import_file(
example_ledger: FavaLedger,
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setattr(example_ledger.fava_options, "import_dirs", ["/test/"])
assert not is_document_or_import_file("/asdfasdf", example_ledger)
assert not is_document_or_import_file("/test/../../err", example_ledger)
assert is_document_or_import_file("/test/err/../err", example_ledger)
assert is_document_or_import_file("/test/err/../err", example_ledger)
def test_filepath_in_documents_folder(
example_ledger: FavaLedger,
monkeypatch: pytest.MonkeyPatch,
) -> None:
monkeypatch.setitem(example_ledger.options, "documents", ["/test"])
def _join(start: str, *args: str) -> Path:
return Path(start).joinpath(*args).resolve()
assert filepath_in_document_folder(
"/test",
"Assets:US:BofA:Checking",
"filename",
example_ledger,
) == _join("/test", "Assets", "US", "BofA", "Checking", "filename")
assert filepath_in_document_folder(
"/test",
"Assets:US:BofA:Checking",
"file/name",
example_ledger,
) == _join("/test", "Assets", "US", "BofA", "Checking", "file name")
assert filepath_in_document_folder(
"/test",
"Assets:US:BofA:Checking",
"/../file/name",
example_ledger,
) == _join("/test", "Assets", "US", "BofA", "Checking", " .. file name")
with pytest.raises(FavaAPIError):
filepath_in_document_folder(
"/test",
"notanaccount",
"filename",
example_ledger,
)
with pytest.raises(FavaAPIError):
filepath_in_document_folder(
"/notadocumentsfolder",
"Assets:US:BofA:Checking",
"filename",
example_ledger,
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,450
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/charts.py
|
"""Provide data suitable for Fava's charts."""
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from dataclasses import fields
from dataclasses import is_dataclass
from datetime import date
from datetime import timedelta
from decimal import Decimal
from typing import Any
from typing import Iterable
from typing import Pattern
from typing import TYPE_CHECKING
from beancount.core import realization
from beancount.core.data import Booking
from beancount.core.data import iter_entry_dates
from beancount.core.inventory import Inventory
from beancount.core.number import MISSING
from flask.json.provider import JSONProvider
from simplejson import dumps as simplejson_dumps
from simplejson import loads as simplejson_loads
from fava.beans.abc import Amount
from fava.beans.abc import Position
from fava.beans.abc import Transaction
from fava.beans.flags import FLAG_UNREALIZED
from fava.core.conversion import cost_or_value
from fava.core.conversion import units
from fava.core.inventory import CounterInventory
from fava.core.module_base import FavaModule
from fava.core.tree import Tree
from fava.helpers import FavaAPIError
from fava.util import listify
if TYPE_CHECKING: # pragma: no cover
from fava.beans.funcs import ResultRow
from fava.beans.funcs import ResultType
from fava.core import FilteredLedger
from fava.core.inventory import SimpleCounterInventory
from fava.core.tree import SerialisedTreeNode
from fava.util.date import Interval
ONE_DAY = timedelta(days=1)
ZERO = Decimal("0")
def inv_to_dict(inventory: Inventory) -> dict[str, Decimal]:
"""Convert an inventory to a simple cost->number dict."""
return {
pos.units.currency: pos.units.number
for pos in inventory
if pos.units.number is not None
}
Inventory.for_json = inv_to_dict # type: ignore[attr-defined]
def _json_default(o: Any) -> Any:
"""Specific serialisation for some data types."""
if isinstance(o, (date, Amount, Booking, Position)):
return str(o)
if isinstance(o, (set, frozenset)):
return list(o)
if isinstance(o, Pattern):
return o.pattern
if is_dataclass(o):
return {field.name: getattr(o, field.name) for field in fields(o)}
if o is MISSING:
return None
raise TypeError
def dumps(obj: Any, **_kwargs: Any) -> str:
"""Dump as a JSON string."""
return simplejson_dumps(
obj,
indent=" ",
sort_keys=True,
default=_json_default,
for_json=True,
)
def loads(s: str | bytes) -> Any:
"""Load a JSON string."""
return simplejson_loads(s)
class FavaJSONProvider(JSONProvider):
"""Use custom JSON encoder and decoder."""
def dumps(self, obj: Any, **_kwargs: Any) -> str:
return dumps(obj)
def loads(self, s: str | bytes, **_kwargs: Any) -> Any:
return loads(s)
@dataclass(frozen=True)
class DateAndBalance:
"""Balance at a date."""
date: date
balance: dict[str, Decimal]
@dataclass(frozen=True)
class DateAndBalanceWithBudget:
"""Balance at a date with a budget."""
date: date
balance: SimpleCounterInventory
account_balances: dict[str, SimpleCounterInventory]
budgets: dict[str, Decimal]
class ChartModule(FavaModule):
"""Return data for the various charts in Fava."""
def hierarchy(
self,
filtered: FilteredLedger,
account_name: str,
conversion: str,
begin: date | None = None,
end: date | None = None,
) -> SerialisedTreeNode:
"""Render an account tree."""
if begin is not None and end is not None:
tree = Tree(iter_entry_dates(filtered.entries, begin, end))
else:
tree = filtered.root_tree
return tree.get(account_name).serialise(
conversion,
self.ledger.prices,
end - ONE_DAY if end is not None else None,
)
@listify
def interval_totals(
self,
filtered: FilteredLedger,
interval: Interval,
accounts: str | tuple[str, ...],
conversion: str,
invert: bool = False,
) -> Iterable[DateAndBalanceWithBudget]:
"""Render totals for account (or accounts) in the intervals.
Args:
filtered: The filtered ledger.
interval: An interval.
accounts: A single account (str) or a tuple of accounts.
conversion: The conversion to use.
invert: invert all numbers.
"""
# pylint: disable=too-many-locals
prices = self.ledger.prices
# limit the bar charts to 100 intervals
intervals = filtered.interval_ranges(interval)[-100:]
for date_range in intervals:
inventory = CounterInventory()
entries = iter_entry_dates(
filtered.entries,
date_range.begin,
date_range.end,
)
account_inventories: dict[str, CounterInventory] = defaultdict(
CounterInventory,
)
for entry in entries:
for posting in getattr(entry, "postings", []):
if posting.account.startswith(accounts):
account_inventories[posting.account].add_position(
posting,
)
inventory.add_position(posting)
balance = cost_or_value(
inventory,
conversion,
prices,
date_range.end_inclusive,
)
account_balances = {
account: cost_or_value(
acct_value,
conversion,
prices,
date_range.end_inclusive,
)
for account, acct_value in account_inventories.items()
}
budgets = (
self.ledger.budgets.calculate_children(
accounts,
date_range.begin,
date_range.end,
)
if isinstance(accounts, str)
else {}
)
if invert:
balance = -balance
budgets = {k: -v for k, v in budgets.items()}
account_balances = {k: -v for k, v in account_balances.items()}
yield DateAndBalanceWithBudget(
date_range.end_inclusive,
balance,
account_balances,
budgets,
)
@listify
def linechart(
self,
filtered: FilteredLedger,
account_name: str,
conversion: str,
) -> Iterable[DateAndBalance]:
"""Get the balance of an account as a line chart.
Args:
filtered: The filtered ledger.
account_name: A string.
conversion: The conversion to use.
Returns:
A list of dicts for all dates on which the balance of the given
account has changed containing the balance (in units) of the
account at that date.
"""
real_account = realization.get_or_create(
filtered.root_account,
account_name,
)
postings = realization.get_postings(real_account)
journal = realization.iterate_with_balance(postings) # type: ignore[arg-type]
# When the balance for a commodity just went to zero, it will be
# missing from the 'balance' so keep track of currencies that last had
# a balance.
last_currencies = None
prices = self.ledger.prices
for entry, _, change, balance_inventory in journal:
if change.is_empty():
continue
balance = inv_to_dict(
cost_or_value(
balance_inventory,
conversion,
prices,
entry.date,
),
)
currencies = set(balance.keys())
if last_currencies:
for currency in last_currencies - currencies:
balance[currency] = ZERO
last_currencies = currencies
yield DateAndBalance(entry.date, balance)
@listify
def net_worth(
self,
filtered: FilteredLedger,
interval: Interval,
conversion: str,
) -> Iterable[DateAndBalance]:
"""Compute net worth.
Args:
filtered: The filtered ledger.
interval: A string for the interval.
conversion: The conversion to use.
Returns:
A list of dicts for all ends of the given interval containing the
net worth (Assets + Liabilities) separately converted to all
operating currencies.
"""
transactions = (
entry
for entry in filtered.entries
if (
isinstance(entry, Transaction)
and entry.flag != FLAG_UNREALIZED
)
)
types = (
self.ledger.options["name_assets"],
self.ledger.options["name_liabilities"],
)
txn = next(transactions, None)
inventory = CounterInventory()
prices = self.ledger.prices
for date_range in filtered.interval_ranges(interval):
while txn and txn.date < date_range.end:
for posting in txn.postings:
if posting.account.startswith(types):
inventory.add_position(posting)
txn = next(transactions, None)
yield DateAndBalance(
date_range.end_inclusive,
cost_or_value(
inventory,
conversion,
prices,
date_range.end_inclusive,
),
)
@staticmethod
def can_plot_query(types: list[ResultType]) -> bool:
"""Whether we can plot the given query.
Args:
types: The list of types returned by the BQL query.
"""
return (
len(types) == 2
and types[0][1] in {str, date}
and types[1][1] is Inventory
)
def query(
self,
types: list[ResultType],
rows: list[ResultRow],
) -> list[dict[str, date | str | Inventory]]:
"""Chart for a query.
Args:
types: The list of result row types.
rows: The result rows.
"""
if not self.can_plot_query(types):
raise FavaAPIError("Can not plot the given chart.")
if types[0][1] is date:
return [
{"date": date, "balance": units(inv)} for date, inv in rows
]
return [{"group": group, "balance": units(inv)} for group, inv in rows]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,451
|
beancount/fava
|
refs/heads/main
|
/src/fava/__init__.py
|
"""Fava - A web interface for Beancount."""
from __future__ import annotations
from contextlib import suppress
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version
with suppress(PackageNotFoundError):
__version__ = version(__name__)
LOCALES = [
"bg",
"ca",
"de",
"es",
"fa",
"fr",
"nl",
"pt",
"ru",
"sk",
"sv",
"uk",
"zh",
"zh_Hant_TW",
]
LANGUAGES = [locale[:2] for locale in LOCALES]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,452
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/module_base.py
|
"""Base class for the "modules" of FavaLedger."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
class FavaModule:
"""Base class for the "modules" of FavaLedger."""
def __init__(self, ledger: FavaLedger) -> None:
self.ledger = ledger
def load_file(self) -> None:
"""Run when the file has been (re)loaded."""
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,453
|
beancount/fava
|
refs/heads/main
|
/tests/test_util.py
|
from __future__ import annotations
from typing import Iterable
from typing import TYPE_CHECKING
from werkzeug.test import Client
from werkzeug.wrappers import Response
from fava.util import listify
from fava.util import next_key
from fava.util import send_file_inline
from fava.util import simple_wsgi
from fava.util import slugify
if TYPE_CHECKING: # pragma: no cover
from pathlib import Path
from flask import Flask
def test_listify() -> None:
@listify
def fun() -> Iterable[int]:
yield from [1, 2, 3]
assert fun() == [1, 2, 3]
def test_simple_wsgi() -> None:
client = Client(simple_wsgi, Response)
resp = client.get("/any_path")
assert resp.status_code == 200
assert resp.data == b""
def test_next_key() -> None:
assert next_key("statement", {}) == "statement"
assert next_key("statement", {"foo": 1}) == "statement"
assert next_key("statement", {"foo": 1, "statement": 1}) == "statement-2"
assert (
next_key("statement", {"statement": 1, "statement-2": 1})
== "statement-3"
)
def test_slugify() -> None:
assert slugify("Example Beancount File") == "example-beancount-file"
assert slugify(" Example Beancount File ") == "example-beancount-file"
assert slugify("test") == "test"
assert slugify("烫烫烫") == "烫烫烫"
assert slugify("nonun烫icode 烫烫") == "nonun烫icode-烫烫"
assert not slugify("%✓")
assert slugify("söße") == "söße"
assert slugify("ASDF") == "asdf"
assert slugify("ASDF test test") == "asdf-test-test"
def test_send_file_inline(app: Flask, test_data_dir: Path) -> None:
with app.test_request_context():
resp = send_file_inline(str(test_data_dir / "example-balances.csv"))
assert (
resp.headers["Content-Disposition"]
== "inline; filename*=UTF-8''example-balances.csv"
)
resp = send_file_inline(str(test_data_dir / "example-utf8-🦁.txt"))
assert (
resp.headers["Content-Disposition"]
== "inline; filename*=UTF-8''example-utf8-%F0%9F%A6%81.txt"
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,454
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/filters.py
|
"""Entry filters."""
from __future__ import annotations
import re
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Callable
from typing import Iterable
from typing import TYPE_CHECKING
import ply.yacc # type: ignore[import]
from beancount.core import account
from beancount.ops.summarize import clamp_opt # type: ignore[import]
from fava.beans.account import get_entry_accounts
from fava.helpers import FavaAPIError
from fava.util.date import DateRange
from fava.util.date import parse_date
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.types import BeancountOptions
from fava.core.fava_options import FavaOptions
class FilterError(FavaAPIError):
"""Filter exception."""
def __init__(self, filter_type: str, message: str) -> None:
super().__init__(message)
self.filter_type = filter_type
def __str__(self) -> str:
return self.message
class Token:
"""A token having a certain type and value.
The lexer attribute only exists since PLY writes to it in case of a parser
error.
"""
__slots__ = ("type", "value", "lexer")
def __init__(self, type_: str, value: str) -> None:
self.type = type_
self.value = value
def __repr__(self) -> str:
return f"Token({self.type}, {self.value})"
class FilterSyntaxLexer:
"""Lexer for Fava's filter syntax."""
tokens = ("ANY", "ALL", "KEY", "LINK", "STRING", "TAG")
RULES = (
("LINK", r"\^[A-Za-z0-9\-_/.]+"),
("TAG", r"\#[A-Za-z0-9\-_/.]+"),
("KEY", r"[a-z][a-zA-Z0-9\-_]+:"),
("ALL", r"all\("),
("ANY", r"any\("),
("STRING", r'\w[-\w]*|"[^"]*"|\'[^\']*\''),
)
regex = re.compile(
"|".join((f"(?P<{name}>{rule})" for name, rule in RULES)),
)
def LINK(self, token: str, value: str) -> tuple[str, str]: # noqa: N802
return token, value[1:]
def TAG(self, token: str, value: str) -> tuple[str, str]: # noqa: N802
return token, value[1:]
def KEY(self, token: str, value: str) -> tuple[str, str]: # noqa: N802
return token, value[:-1]
def ALL(self, token: str, _: str) -> tuple[str, str]: # noqa: N802
return token, token
def ANY(self, token: str, _: str) -> tuple[str, str]: # noqa: N802
return token, token
def STRING(self, token: str, value: str) -> tuple[str, str]: # noqa: N802
if value[0] in ['"', "'"]:
return token, value[1:-1]
return token, value
def lex(self, data: str) -> Iterable[Token]:
"""A generator yielding all tokens in a given line.
Arguments:
data: A string, the line to lex.
Yields:
All Tokens in the line.
"""
ignore = " \t"
literals = "-,()"
regex = self.regex.match
pos = 0
length = len(data)
while pos < length:
char = data[pos]
if char in ignore:
pos += 1
continue
match = regex(data, pos)
if match:
value = match.group()
pos += len(value)
token = match.lastgroup
if token is None:
raise ValueError("Internal Error")
func: Callable[[str, str], tuple[str, str]] = getattr(
self,
token,
)
ret = func(token, value)
yield Token(*ret)
elif char in literals:
yield Token(char, char)
pos += 1
else:
raise FilterError(
"filter",
f'Illegal character "{char}" in filter.',
)
class Match:
"""Match a string."""
__slots__ = ("match",)
def __init__(self, search: str) -> None:
try:
match = re.compile(search, re.IGNORECASE).search
self.match: Callable[[str], bool] = lambda s: bool(match(s))
except re.error:
self.match = lambda s: s == search
def __call__(self, string: str) -> bool:
return self.match(string)
class FilterSyntaxParser:
precedence = (("left", "AND"), ("right", "UMINUS"))
tokens = FilterSyntaxLexer.tokens
def p_error(self, _: Any) -> None:
raise FilterError("filter", "Failed to parse filter: ")
def p_filter(self, p: list[Any]) -> None:
"""
filter : expr
"""
p[0] = p[1]
def p_expr(self, p: list[Any]) -> None:
"""
expr : simple_expr
"""
p[0] = p[1]
def p_expr_all(self, p: list[Any]) -> None:
"""
expr : ALL expr ')'
"""
expr = p[2]
def _match_postings(entry: Directive) -> bool:
return all(
expr(posting) for posting in getattr(entry, "postings", [])
)
p[0] = _match_postings
def p_expr_any(self, p: list[Any]) -> None:
"""
expr : ANY expr ')'
"""
expr = p[2]
def _match_postings(entry: Directive) -> bool:
return any(
expr(posting) for posting in getattr(entry, "postings", [])
)
p[0] = _match_postings
def p_expr_parentheses(self, p: list[Any]) -> None:
"""
expr : '(' expr ')'
"""
p[0] = p[2]
def p_expr_and(self, p: list[Any]) -> None:
"""
expr : expr expr %prec AND
"""
left, right = p[1], p[2]
def _and(entry: Directive) -> bool:
return left(entry) and right(entry) # type: ignore[no-any-return]
p[0] = _and
def p_expr_or(self, p: list[Any]) -> None:
"""
expr : expr ',' expr
"""
left, right = p[1], p[3]
def _or(entry: Directive) -> bool:
return left(entry) or right(entry) # type: ignore[no-any-return]
p[0] = _or
def p_expr_negated(self, p: list[Any]) -> None:
"""
expr : '-' expr %prec UMINUS
"""
func = p[2]
def _neg(entry: Directive) -> bool:
return not func(entry)
p[0] = _neg
def p_simple_expr_TAG(self, p: list[Any]) -> None: # noqa: N802
"""
simple_expr : TAG
"""
tag = p[1]
def _tag(entry: Directive) -> bool:
tags = getattr(entry, "tags", None)
return (tag in tags) if tags is not None else False
p[0] = _tag
def p_simple_expr_LINK(self, p: list[Any]) -> None: # noqa: N802
"""
simple_expr : LINK
"""
link = p[1]
def _link(entry: Directive) -> bool:
links = getattr(entry, "links", None)
return (link in links) if links is not None else False
p[0] = _link
def p_simple_expr_STRING(self, p: list[Any]) -> None: # noqa: N802
"""
simple_expr : STRING
"""
string = p[1]
match = Match(string)
def _string(entry: Directive) -> bool:
for name in ("narration", "payee", "comment"):
value = getattr(entry, name, "")
if value and match(value):
return True
return False
p[0] = _string
def p_simple_expr_key(self, p: list[Any]) -> None:
"""
simple_expr : KEY STRING
"""
key, value = p[1], p[2]
match = Match(value)
def _key(entry: Directive) -> bool:
if hasattr(entry, key):
return match(str(getattr(entry, key) or ""))
if entry.meta is not None and key in entry.meta:
return match(str(entry.meta.get(key)))
return False
p[0] = _key
class EntryFilter(ABC):
"""Filters a list of entries."""
@abstractmethod
def apply(self, entries: list[Directive]) -> list[Directive]:
"""Filter a list of directives."""
class TimeFilter(EntryFilter):
"""Filter by dates."""
__slots__ = ("date_range", "_options")
def __init__(
self,
options: BeancountOptions,
fava_options: FavaOptions,
value: str,
) -> None:
self._options = options
begin, end = parse_date(value, fava_options.fiscal_year_end)
if not begin or not end:
raise FilterError("time", f"Failed to parse date: {value}")
self.date_range = DateRange(begin, end)
def apply(self, entries: list[Directive]) -> list[Directive]:
entries, _ = clamp_opt(
entries,
self.date_range.begin,
self.date_range.end,
self._options,
)
return entries
LEXER = FilterSyntaxLexer()
PARSE = ply.yacc.yacc(
errorlog=ply.yacc.NullLogger(),
write_tables=False,
debug=False,
module=FilterSyntaxParser(),
).parse
class AdvancedFilter(EntryFilter):
"""Filter by tags and links and keys."""
__slots__ = ("_include",)
def __init__(self, value: str) -> None:
try:
tokens = LEXER.lex(value)
self._include = PARSE(
lexer="NONE",
tokenfunc=lambda toks=tokens: next(toks, None),
)
except FilterError as exception:
exception.message = exception.message + value
raise
def apply(self, entries: list[Directive]) -> list[Directive]:
_include = self._include
return [entry for entry in entries if _include(entry)]
class AccountFilter(EntryFilter):
"""Filter by account.
The filter string can either be a regular expression or a parent account.
"""
__slots__ = ("_value", "_match")
def __init__(self, value: str) -> None:
self._value = value
self._match = Match(value)
def apply(self, entries: list[Directive]) -> list[Directive]:
value = self._value
if not value:
return entries
match = self._match
return [
entry
for entry in entries
if any(
account.has_component(name, value) or match(name)
for name in get_entry_accounts(entry)
)
]
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,455
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/attributes.py
|
"""Attributes for auto-completion."""
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.core.module_base import FavaModule
from fava.util.date import END_OF_YEAR
from fava.util.ranking import ExponentialDecayRanker
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.abc import Transaction
from fava.core import FavaLedger
from fava.util.date import FiscalYearEnd
def get_active_years(
entries: list[Directive],
fye: FiscalYearEnd,
) -> list[str]:
"""Return active years, with support for fiscal years.
Args:
entries: Beancount entries
fye: fiscal year end
Returns:
A reverse sorted list of years or fiscal years that occur in the
entries.
"""
years = []
if fye == END_OF_YEAR:
prev_year = None
for entry in entries:
year = entry.date.year
if year != prev_year:
prev_year = year
years.append(year)
return [f"{year}" for year in reversed(years)]
month = fye.month
day = fye.day
prev_year = None
for entry in entries:
date = entry.date
year = (
entry.date.year + 1
if date.month > month or date.month == month and date.day > day
else entry.date.year
)
if year != prev_year:
prev_year = year
years.append(year)
return [f"FY{year}" for year in reversed(years)]
class AttributesModule(FavaModule):
"""Some attributes of the ledger (mostly for auto-completion)."""
def __init__(self, ledger: FavaLedger) -> None:
super().__init__(ledger)
self.accounts: list[str] = []
self.currencies: list[str] = []
self.payees: list[str] = []
self.links: list[str] = []
self.tags: list[str] = []
self.years: list[str] = []
def load_file(self) -> None:
all_entries = self.ledger.all_entries
all_links = set()
all_tags = set()
for entry in all_entries:
links = getattr(entry, "links", None)
if links is not None:
all_links.update(links)
tags = getattr(entry, "tags", None)
if tags is not None:
all_tags.update(tags)
self.links = sorted(all_links)
self.tags = sorted(all_tags)
self.years = get_active_years(
all_entries,
self.ledger.fava_options.fiscal_year_end,
)
account_ranker = ExponentialDecayRanker(
sorted(self.ledger.accounts.keys()),
)
currency_ranker = ExponentialDecayRanker()
payee_ranker = ExponentialDecayRanker()
for txn in self.ledger.all_entries_by_type.Transaction:
if txn.payee:
payee_ranker.update(txn.payee, txn.date)
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
currency_ranker.update(posting.units.currency, txn.date)
if posting.cost and posting.cost.currency is not None:
currency_ranker.update(posting.cost.currency, txn.date)
self.accounts = account_ranker.sort()
self.currencies = currency_ranker.sort()
self.payees = payee_ranker.sort()
def payee_accounts(self, payee: str) -> list[str]:
"""Rank accounts for the given payee."""
account_ranker = ExponentialDecayRanker(self.accounts)
transactions = self.ledger.all_entries_by_type.Transaction
for txn in transactions:
if txn.payee == payee:
for posting in txn.postings:
account_ranker.update(posting.account, txn.date)
return account_ranker.sort()
def payee_transaction(self, payee: str) -> Transaction | None:
"""Get the last transaction for a payee."""
transactions = self.ledger.all_entries_by_type.Transaction
for txn in reversed(transactions):
if txn.payee == payee:
return txn
return None
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,456
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_tree.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.core.tree import Tree
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
from .conftest import SnapshotFunc
def test_tree() -> None:
tree = Tree()
assert len(tree) == 1
tree.get("account:name:a:b:c")
assert len(tree) == 1
node = tree.get("account:name:a:b:c", insert=True)
assert tree.accounts == [
"",
"account",
"account:name",
"account:name:a",
"account:name:a:b",
"account:name:a:b:c",
]
assert len(tree) == 6
tree.get("account:name", insert=True)
assert len(tree) == 6
assert node is tree.get("account:name:a:b:c", insert=True)
assert list(tree.ancestors("account:name:a:b:c")) == [
tree.get("account:name:a:b"),
tree.get("account:name:a"),
tree.get("account:name"),
tree.get("account"),
tree.get(""),
]
assert len(list(tree.ancestors("not:account:name:a:b:c"))) == 6
def test_tree_from_entries(
example_ledger: FavaLedger,
snapshot: SnapshotFunc,
) -> None:
tree = Tree(example_ledger.all_entries)
snapshot({n.name: n.balance for n in tree.values()})
snapshot(tree["Assets"].balance_children)
def test_tree_cap(example_ledger: FavaLedger, snapshot: SnapshotFunc) -> None:
tree = Tree(example_ledger.all_entries)
tree.cap(example_ledger.options, "Unrealized")
snapshot({n.name: n.balance for n in tree.values()})
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,457
|
beancount/fava
|
refs/heads/main
|
/tests/test_plugins_tag_discovered_documents.py
|
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
from beancount.loader import load_file
from fava.beans.abc import Document
def test_tag_discovered_plugin(tmp_path: Path) -> None:
# Create sample files
assets_cash = tmp_path / "documents" / "Assets" / "Cash"
assets_cash.mkdir(parents=True)
discovered = "2016-11-05 Test 4 discovered.pdf"
(assets_cash / discovered).touch()
non_discovered = "Test 5.pdf"
(assets_cash / non_discovered).touch()
assets_cash_rel = Path("documents") / "Assets" / "Cash"
beancount_file = tmp_path / "example-tag-discovered.beancount"
beancount_file.write_text(dedent(f"""
option "title" "Test tag discovered documents"
option "operating_currency" "EUR"
option "documents" "{tmp_path / "documents"}"
plugin "fava.plugins.tag_discovered_documents"
2016-10-31 open Assets:Cash
2016-11-06 document Assets:Cash "{assets_cash_rel / non_discovered}"
""".replace("\\", "\\\\")))
entries, errors, _ = load_file(str(beancount_file))
assert not errors
assert len(entries) == 3
assert isinstance(entries[1], Document)
assert discovered in entries[1].filename
assert entries[1].tags
assert "discovered" in entries[1].tags
assert isinstance(entries[2], Document)
assert non_discovered in entries[2].filename
assert not entries[2].tags
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,458
|
beancount/fava
|
refs/heads/main
|
/src/fava/util/date.py
|
"""Date-related functionality.
Note:
Date ranges are always tuples (start, end) from the (inclusive) start date
to the (exclusive) end date.
"""
from __future__ import annotations
import datetime
import re
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from itertools import tee
from typing import Iterable
from typing import Iterator
from flask_babel import gettext # type: ignore[import]
IS_RANGE_RE = re.compile(r"(.*?)(?:-|to)(?=\s*(?:fy)*\d{4})(.*)")
# these match dates of the form 'year-month-day'
# day or month and day may be omitted
YEAR_RE = re.compile(r"^\d{4}$")
MONTH_RE = re.compile(r"^(\d{4})-(\d{2})$")
DAY_RE = re.compile(r"^(\d{4})-(\d{2})-(\d{2})$")
# this matches a week like 2016-W02 for the second week of 2016
WEEK_RE = re.compile(r"^(\d{4})-w(\d{2})$")
# this matches a quarter like 2016-Q1 for the first quarter of 2016
QUARTER_RE = re.compile(r"^(\d{4})-q(\d)$")
# this matches a financial year like FY2018 for the financial year ending 2018
FY_RE = re.compile(r"^fy(\d{4})$")
# this matches a quarter in a financial year like FY2018-Q2
FY_QUARTER_RE = re.compile(r"^fy(\d{4})-q(\d)$")
VARIABLE_RE = re.compile(
r"\(?(fiscal_year|year|fiscal_quarter|quarter"
r"|month|week|day)(?:([-+])(\d+))?\)?",
)
@dataclass(frozen=True)
class FiscalYearEnd:
"""Month and day that specify the end of the fiscal year."""
month: int
day: int
END_OF_YEAR = FiscalYearEnd(12, 31)
class Interval(Enum):
"""The possible intervals."""
YEAR = "year"
QUARTER = "quarter"
MONTH = "month"
WEEK = "week"
DAY = "day"
@property
def label(self) -> str:
"""The label for the interval."""
labels: dict[Interval, str] = {
Interval.YEAR: gettext("Yearly"),
Interval.QUARTER: gettext("Quarterly"),
Interval.MONTH: gettext("Monthly"),
Interval.WEEK: gettext("Weekly"),
Interval.DAY: gettext("Daily"),
}
return labels[self]
@staticmethod
def get(string: str) -> Interval:
"""Return the enum member for a string."""
try:
return Interval[string.upper()]
except KeyError:
return Interval.MONTH
def format_date(self, date: datetime.date) -> str:
"""Format a date for this interval for human consumption."""
if self is Interval.YEAR:
return date.strftime("%Y")
if self is Interval.QUARTER:
return f"{date.year}Q{(date.month - 1) // 3 + 1}"
if self is Interval.MONTH:
return date.strftime("%b %Y")
if self is Interval.WEEK:
return date.strftime("%YW%W")
return date.strftime("%Y-%m-%d")
def format_date_filter(self, date: datetime.date) -> str:
"""Format a date for this interval for the Fava time filter."""
if self is Interval.YEAR:
return date.strftime("%Y")
if self is Interval.QUARTER:
return f"{date.year}-Q{(date.month - 1) // 3 + 1}"
if self is Interval.MONTH:
return date.strftime("%Y-%m")
if self is Interval.WEEK:
return date.strftime("%Y-W%W")
return date.strftime("%Y-%m-%d")
def get_prev_interval(
date: datetime.date,
interval: Interval,
) -> datetime.date:
"""Get the start date of the interval in which the date falls.
Args:
date: A date.
interval: An interval.
Returns:
The start date of the `interval` before `date`.
"""
if interval is Interval.YEAR:
return datetime.date(date.year, 1, 1)
if interval is Interval.QUARTER:
for i in [10, 7, 4]:
if date.month > i:
return datetime.date(date.year, i, 1)
return datetime.date(date.year, 1, 1)
if interval is Interval.MONTH:
return datetime.date(date.year, date.month, 1)
if interval is Interval.WEEK:
return date - timedelta(date.weekday())
return date
def get_next_interval( # noqa: PLR0911
date: datetime.date,
interval: Interval,
) -> datetime.date:
"""Get the start date of the next interval.
Args:
date: A date.
interval: An interval.
Returns:
The start date of the next `interval` after `date`.
"""
try:
if interval is Interval.YEAR:
return datetime.date(date.year + 1, 1, 1)
if interval is Interval.QUARTER:
for i in [4, 7, 10]:
if date.month < i:
return datetime.date(date.year, i, 1)
return datetime.date(date.year + 1, 1, 1)
if interval is Interval.MONTH:
month = (date.month % 12) + 1
year = date.year + (date.month + 1 > 12)
return datetime.date(year, month, 1)
if interval is Interval.WEEK:
return date + timedelta(7 - date.weekday())
if interval is Interval.DAY:
return date + timedelta(1)
except (ValueError, OverflowError):
return datetime.date.max
raise NotImplementedError
def interval_ends(
first: datetime.date,
last: datetime.date,
interval: Interval,
) -> Iterator[datetime.date]:
"""Get interval ends."""
yield get_prev_interval(first, interval)
while first < last:
first = get_next_interval(first, interval)
yield first
ONE_DAY = timedelta(days=1)
@dataclass
class DateRange:
"""A range of dates, usually matching an interval."""
#: The inclusive start date of this range of dates.
begin: datetime.date
#: The exclusive end date of this range of dates.
end: datetime.date
@property
def end_inclusive(self) -> datetime.date:
"""The last day of this interval."""
return self.end - ONE_DAY
def dateranges(
begin: datetime.date,
end: datetime.date,
interval: Interval,
) -> Iterable[DateRange]:
"""Get date ranges for the given begin and end date.
Args:
begin: The begin date - the first interval date range will
include this date
end: The end date - the last interval will end on or after
date
interval: The type of interval to generate ranges for.
Yields:
Date ranges for all intervals of the given in the
"""
ends = interval_ends(begin, end, interval)
left, right = tee(ends)
next(right, None)
for interval_begin, interval_end in zip(left, right):
yield DateRange(interval_begin, interval_end)
def substitute(string: str, fye: FiscalYearEnd | None = None) -> str:
"""Replace variables referring to the current day.
Args:
string: A string, possibly containing variables for today.
fye: Use a specific fiscal-year-end
Returns:
A string, where variables referring to the current day, like 'year' or
'week' have been replaced by the corresponding string understood by
:func:`parse_date`. Can compute addition and subtraction.
"""
# pylint: disable=too-many-locals
today = datetime.date.today()
for match in VARIABLE_RE.finditer(string):
complete_match, interval, plusminus_, mod_ = match.group(0, 1, 2, 3)
mod = int(mod_) if mod_ else 0
plusminus = 1 if plusminus_ == "+" else -1
if interval == "fiscal_year":
year = today.year
start, end = get_fiscal_period(year, fye)
if end and today >= end:
year += 1
year += plusminus * mod
string = string.replace(complete_match, f"FY{year}")
if interval == "year":
year = today.year + plusminus * mod
string = string.replace(complete_match, str(year))
if interval == "fiscal_quarter":
target = month_offset(today.replace(day=1), plusminus * mod * 3)
start, end = get_fiscal_period(target.year, fye)
if start and start.day != 1:
raise ValueError(
"Cannot use fiscal_quarter if fiscal year "
"does not start on first of the month",
)
if end and target >= end:
start = end
if start:
quarter = int(((target.month - start.month) % 12) / 3)
string = string.replace(
complete_match,
f"FY{start.year + 1}-Q{(quarter % 4) + 1}",
)
if interval == "quarter":
quarter_today = (today.month - 1) // 3 + 1
year = today.year + (quarter_today + plusminus * mod - 1) // 4
quarter = (quarter_today + plusminus * mod - 1) % 4 + 1
string = string.replace(complete_match, f"{year}-Q{quarter}")
if interval == "month":
year = today.year + (today.month + plusminus * mod - 1) // 12
month = (today.month + plusminus * mod - 1) % 12 + 1
string = string.replace(complete_match, f"{year}-{month:02}")
if interval == "week":
delta = timedelta(plusminus * mod * 7)
string = string.replace(
complete_match,
(today + delta).strftime("%Y-W%W"),
)
if interval == "day":
delta = timedelta(plusminus * mod)
string = string.replace(
complete_match,
(today + delta).isoformat(),
)
return string
def parse_date( # noqa: PLR0911
string: str,
fye: FiscalYearEnd | None = None,
) -> tuple[datetime.date | None, datetime.date | None]:
"""Parse a date.
Example of supported formats:
- 2010-03-15, 2010-03, 2010
- 2010-W01, 2010-Q3
- FY2012, FY2012-Q2
Ranges of dates can be expressed in the following forms:
- start - end
- start to end
where start and end look like one of the above examples
Args:
string: A date(range) in our custom format.
fye: The fiscal year end to consider.
Returns:
A tuple (start, end) of dates.
"""
string = string.strip().lower()
if not string:
return None, None
string = substitute(string, fye).lower()
match = IS_RANGE_RE.match(string)
if match:
return (
parse_date(match.group(1), fye)[0],
parse_date(match.group(2), fye)[1],
)
match = YEAR_RE.match(string)
if match:
year = int(match.group(0))
start = datetime.date(year, 1, 1)
return start, get_next_interval(start, Interval.YEAR)
match = MONTH_RE.match(string)
if match:
year, month = map(int, match.group(1, 2))
start = datetime.date(year, month, 1)
return start, get_next_interval(start, Interval.MONTH)
match = DAY_RE.match(string)
if match:
year, month, day = map(int, match.group(1, 2, 3))
start = datetime.date(year, month, day)
return start, get_next_interval(start, Interval.DAY)
match = WEEK_RE.match(string)
if match:
year, week = map(int, match.group(1, 2))
date_str = f"{year}{week}1"
first_week_day = datetime.datetime.strptime(date_str, "%Y%W%w").date()
return first_week_day, get_next_interval(first_week_day, Interval.WEEK)
match = QUARTER_RE.match(string)
if match:
year, quarter = map(int, match.group(1, 2))
quarter_first_day = datetime.date(year, (quarter - 1) * 3 + 1, 1)
return (
quarter_first_day,
get_next_interval(quarter_first_day, Interval.QUARTER),
)
match = FY_RE.match(string)
if match:
year = int(match.group(1))
return get_fiscal_period(year, fye)
match = FY_QUARTER_RE.match(string)
if match:
year, quarter = map(int, match.group(1, 2))
return get_fiscal_period(year, fye, quarter)
return None, None
def month_offset(date: datetime.date, months: int) -> datetime.date:
"""Offsets a date by a given number of months.
Maintains the day, unless that day is invalid when it will
raise a ValueError
"""
year_delta, month = divmod(date.month - 1 + months, 12)
return date.replace(year=date.year + year_delta, month=month + 1)
def parse_fye_string(fye: str) -> FiscalYearEnd | None:
"""Parse a string option for the fiscal year end.
Args:
fye: The end of the fiscal year to parse.
"""
try:
date = datetime.datetime.strptime(f"2001-{fye}", "%Y-%m-%d")
except ValueError:
return None
return FiscalYearEnd(date.month, date.day)
def get_fiscal_period(
year: int,
fye: FiscalYearEnd | None,
quarter: int | None = None,
) -> tuple[datetime.date | None, datetime.date | None]:
"""Calculate fiscal periods.
Uses the fava option "fiscal-year-end" which should be in "%m-%d" format.
Defaults to calendar year [12-31]
Args:
year: An integer year
fye: End date for period in "%m-%d" format
quarter: one of [None, 1, 2, 3 or 4]
Returns:
A tuple (start, end) of dates.
"""
if fye is None:
start_date = datetime.date(year=year, month=1, day=1)
else:
start_date = datetime.date(
year=year - 1,
month=fye.month,
day=fye.day,
) + timedelta(days=1)
# Special case 02-28 because of leap years
if fye.month == 2 and fye.day == 28:
start_date = start_date.replace(month=3, day=1)
if quarter is None:
return start_date, start_date.replace(year=start_date.year + 1)
if start_date.day != 1:
# quarters make no sense in jurisdictions where period starts
# on a date (UK etc)
return None, None
if quarter < 1 or quarter > 4:
return None, None
if quarter > 1:
start_date = month_offset(start_date, (quarter - 1) * 3)
end_date = month_offset(start_date, 3)
return start_date, end_date
def days_in_daterange(
start_date: datetime.date,
end_date: datetime.date,
) -> Iterator[datetime.date]:
"""Yield a datetime for every day in the specified interval.
Args:
start_date: A start date.
end_date: An end date (exclusive).
Returns:
An iterator yielding all days between `start_date` to `end_date`.
"""
for diff in range((end_date - start_date).days):
yield start_date + timedelta(diff)
def number_of_days_in_period(interval: Interval, date: datetime.date) -> int:
"""Get number of days in the surrounding interval.
Args:
interval: An interval.
date: A date.
Returns:
A number, the number of days surrounding the given date in the
interval.
"""
if interval is Interval.DAY:
return 1
if interval is Interval.WEEK:
return 7
if interval is Interval.MONTH:
date = datetime.date(date.year, date.month, 1)
return (get_next_interval(date, Interval.MONTH) - date).days
if interval is Interval.QUARTER:
quarter = (date.month - 1) / 3 + 1
date = datetime.date(date.year, int(quarter) * 3 - 2, 1)
return (get_next_interval(date, Interval.QUARTER) - date).days
if interval is Interval.YEAR:
date = datetime.date(date.year, 1, 1)
return (get_next_interval(date, Interval.YEAR) - date).days
raise NotImplementedError
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,459
|
beancount/fava
|
refs/heads/main
|
/tests/test_internal_api.py
|
"""Tests for Fava's main Flask app."""
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.core.charts import dumps
from fava.internal_api import ChartApi
from fava.internal_api import get_ledger_data
if TYPE_CHECKING: # pragma: no cover
from flask import Flask
from .conftest import SnapshotFunc
def test_get_ledger_data(app: Flask, snapshot: SnapshotFunc) -> None:
"""The currently filtered journal can be downloaded."""
with app.test_request_context("/long-example/"):
app.preprocess_request()
snapshot(dumps(get_ledger_data()))
def test_chart_api(app: Flask, snapshot: SnapshotFunc) -> None:
"""The serialisation and generation of charts works."""
with app.test_request_context("/long-example/"):
app.preprocess_request()
snapshot(dumps([ChartApi.hierarchy("Assets")]))
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,460
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_charts.py
|
from __future__ import annotations
from decimal import Decimal
from typing import TYPE_CHECKING
import pytest
from fava.core.charts import dumps
from fava.util.date import Interval
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
from .conftest import GetFavaLedger
from .conftest import SnapshotFunc
def test_interval_totals(
small_example_ledger: FavaLedger,
snapshot: SnapshotFunc,
) -> None:
filtered = small_example_ledger.get_filtered()
for conversion in ["at_cost", "USD"]:
data = small_example_ledger.charts.interval_totals(
filtered,
Interval.MONTH,
"Expenses",
conversion,
)
snapshot(dumps(data))
def test_interval_totals_inverted(
small_example_ledger: FavaLedger,
snapshot: SnapshotFunc,
) -> None:
filtered = small_example_ledger.get_filtered()
for conversion in ["at_cost", "USD"]:
data = small_example_ledger.charts.interval_totals(
filtered,
Interval.MONTH,
"Expenses",
conversion,
invert=True,
)
snapshot(dumps(data))
def test_linechart_data(
example_ledger: FavaLedger,
snapshot: SnapshotFunc,
) -> None:
filtered = example_ledger.get_filtered()
for conversion in ["at_cost", "units", "at_value", "USD"]:
data = example_ledger.charts.linechart(
filtered,
"Assets:Testing:MultipleCommodities",
conversion,
)
snapshot(dumps(data))
def test_net_worth(example_ledger: FavaLedger, snapshot: SnapshotFunc) -> None:
filtered = example_ledger.get_filtered()
data = example_ledger.charts.net_worth(filtered, Interval.MONTH, "USD")
snapshot(dumps(data))
def test_net_worth_off_by_one(
snapshot: SnapshotFunc,
get_ledger: GetFavaLedger,
) -> None:
off_by_one = get_ledger("off-by-one")
off_by_one_filtered = off_by_one.get_filtered()
for interval in [Interval.DAY, Interval.MONTH]:
data = off_by_one.charts.net_worth(
off_by_one_filtered,
interval,
"at_value",
)
snapshot(dumps(data))
def test_hierarchy(example_ledger: FavaLedger) -> None:
filtered = example_ledger.get_filtered()
data = example_ledger.charts.hierarchy(filtered, "Assets", "at_cost")
assert data.balance_children == {
"IRAUSD": Decimal("7200.00"),
"USD": Decimal("94320.27840"),
"VACHR": Decimal("-82"),
}
assert data.balance == {}
etrade = data.children[1].children[2]
assert etrade.account == "Assets:US:ETrade"
assert etrade.balance_children == {"USD": Decimal("23137.54")}
@pytest.mark.parametrize(
"query",
[
"select account, sum(position) group by account",
"select joinstr(tags), sum(position) group by joinstr(tags)",
"select date, sum(position) group by date",
],
)
def test_query(
example_ledger: FavaLedger,
snapshot: SnapshotFunc,
query: str,
) -> None:
_, types, rows = example_ledger.query_shell.execute_query(
example_ledger.all_entries,
query,
)
data = example_ledger.charts.query(types, rows)
snapshot(dumps(data))
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,461
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/group_entries.py
|
"""Entries grouped by type."""
from __future__ import annotations
from collections import defaultdict
from typing import NamedTuple
from fava.beans import abc
from fava.beans.account import get_entry_accounts
class EntriesByType(NamedTuple):
"""Entries grouped by type."""
Balance: list[abc.Balance]
Close: list[abc.Close]
Commodity: list[abc.Commodity]
Custom: list[abc.Custom]
Document: list[abc.Document]
Event: list[abc.Event]
Note: list[abc.Note]
Open: list[abc.Open]
Pad: list[abc.Pad]
Price: list[abc.Price]
Query: list[abc.Query]
Transaction: list[abc.Transaction]
def group_entries_by_type(entries: list[abc.Directive]) -> EntriesByType:
"""Group entries by type.
Arguments:
entries: A list of entries to group.
Returns:
A namedtuple containing the grouped lists of entries.
"""
entries_by_type = EntriesByType(
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in entries:
getattr(entries_by_type, entry.__class__.__name__).append(entry)
return entries_by_type
class TransactionPosting(NamedTuple):
"""Pair of a transaction and a posting."""
transaction: abc.Transaction
posting: abc.Posting
def group_entries_by_account(
entries: list[abc.Directive],
) -> dict[str, list[abc.Directive | TransactionPosting]]:
"""Group entries by account.
Arguments:
entries: A list of entries.
Returns:
A dict mapping account names to their entries.
"""
res: dict[str, list[abc.Directive | TransactionPosting]] = defaultdict(
list,
)
for entry in entries:
if isinstance(entry, abc.Transaction):
for posting in entry.postings:
res[posting.account].append(TransactionPosting(entry, posting))
else:
for account in get_entry_accounts(entry):
res[account].append(entry)
return dict(sorted(res.items()))
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,462
|
beancount/fava
|
refs/heads/main
|
/contrib/pythonanywhere/fava/fava_pythonanywhere_com_wsgi.py
|
"""fava wsgi application"""
from __future__ import annotations
from fava.application import create_app
application = create_app(
[
"/home/fava/example.beancount",
"/home/fava/budgets-example.beancount",
"/home/fava/huge-example.beancount",
],
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,463
|
beancount/fava
|
refs/heads/main
|
/contrib/scripts.py
|
#!/usr/bin/env python3
"""Various utilities."""
from __future__ import annotations
import json
from os import environ
from pathlib import Path
from typing import Iterable
import requests
from beancount.query import query_env
from beancount.query import query_parser
from click import echo
from click import group
from click import UsageError
from fava import LOCALES
BASE_PATH = Path(__file__).parent.parent
FAVA_PATH = BASE_PATH / "src" / "fava"
@group()
def cli() -> None:
"""Various utilities."""
def _env_to_list(
attributes: dict[str, str | tuple[str, str]],
) -> Iterable[str]:
for name in attributes:
yield name[0] if isinstance(name, tuple) else name
@cli.command()
def generate_bql_grammar_json() -> None:
"""Generate a JSON file with BQL grammar attributes.
The online code editor needs to have the list of available columns,
functions, and keywords for syntax highlighting and completion.
Should be run whenever the BQL changes."""
target_env = query_env.TargetsEnvironment()
data = {
"columns": sorted(set(_env_to_list(target_env.columns))),
"functions": sorted(set(_env_to_list(target_env.functions))),
"keywords": sorted({kw.lower() for kw in query_parser.Lexer.keywords}),
}
path = BASE_PATH / "frontend" / "src" / "codemirror" / "bql-grammar.ts"
path.write_text("export default " + json.dumps(data))
@cli.command()
def download_translations() -> None:
"""Fetch updated translations from POEditor.com."""
token = environ.get("POEDITOR_TOKEN")
if not token:
raise UsageError(
"The POEDITOR_TOKEN environment variable needs to be set.",
)
for language in LOCALES:
download_from_poeditor(language, token)
@cli.command()
def upload_translations() -> None:
"""Upload .pot message catalog to POEditor.com."""
token = environ.get("POEDITOR_TOKEN")
if not token:
raise UsageError(
"The POEDITOR_TOKEN environment variable needs to be set.",
)
path = FAVA_PATH / "translations" / "messages.pot"
echo(f"Uploading message catalog: {path}")
data = {
"api_token": token,
"id": 90283,
"updating": "terms",
}
with path.open("rb") as file:
files = {"file": file}
request = requests.post(
"https://api.poeditor.com/v2/projects/upload",
data=data,
files=files,
timeout=10,
)
echo("Done: " + str(request.json()["result"]["terms"]))
# For these languages, the name on POEDITOR is off.
POEDITOR_LANGUAGE_NAME = {"zh": "zh-CN", "zh_Hant_TW": "zh-TW"}
def download_from_poeditor(language: str, token: str) -> None:
"""Download .po-file from POEditor and save to disk."""
echo(f'Downloading .po-file for language "{language}"')
poeditor_name = POEDITOR_LANGUAGE_NAME.get(language, language)
data = {
"api_token": token,
"id": 90283,
"language": poeditor_name,
"type": "po",
}
request = requests.post(
"https://api.poeditor.com/v2/projects/export",
data=data,
timeout=10,
)
url = request.json()["result"]["url"]
content = requests.get(url, timeout=10).content
folder = FAVA_PATH / "translations" / language / "LC_MESSAGES"
if not folder.exists():
folder.mkdir(parents=True)
path = folder / "messages.po"
path.write_bytes(content)
echo(f'Downloaded to "{path}"')
if __name__ == "__main__":
cli()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,464
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_attributes.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
from fava.core.attributes import get_active_years
from fava.util.date import FiscalYearEnd
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.core import FavaLedger
def test_get_active_years(load_doc_entries: list[Directive]) -> None:
"""
2010-11-12 * "test"
Assets:T 4.00 USD
Expenses:T
2011-11-12 * "test"
Assets:T 4.00 USD
Expenses:T
2012-12-12 * "test"
Assets:T 4.00 USD
Expenses:T
"""
assert get_active_years(load_doc_entries, FiscalYearEnd(12, 31)) == [
"2012",
"2011",
"2010",
]
assert get_active_years(load_doc_entries, FiscalYearEnd(12, 1)) == [
"FY2013",
"FY2011",
"FY2010",
]
assert get_active_years(load_doc_entries, FiscalYearEnd(11, 1)) == [
"FY2013",
"FY2012",
"FY2011",
]
def test_payee_accounts(example_ledger: FavaLedger) -> None:
attr = example_ledger.attributes
assert attr.payee_accounts("NOTAPAYEE") == attr.accounts
verizon = attr.payee_accounts("Verizon Wireless")
assert verizon[:2] == ["Assets:US:BofA:Checking", "Expenses:Home:Phone"]
assert len(verizon) == len(attr.accounts)
def test_payee_transaction(example_ledger: FavaLedger) -> None:
attr = example_ledger.attributes
assert attr.payee_transaction("NOTAPAYEE") is None
txn = attr.payee_transaction("BayBook")
assert txn
assert str(txn.date) == "2016-05-05"
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,465
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_number.py
|
from __future__ import annotations
from decimal import Decimal
from typing import TYPE_CHECKING
from babel.core import Locale
from fava.core.number import get_locale_format
if TYPE_CHECKING: # pragma: no cover
import pytest
from fava.core import FavaLedger
def test_get_locale_format() -> None:
locale = Locale.parse("da_DK")
dec = Decimal("1.00")
fmt = get_locale_format(locale, 100)
assert fmt(dec) == "1,00000000000000"
fmt = get_locale_format(locale, 14)
assert fmt(dec) == "1,00000000000000"
def test_precisions(example_ledger: FavaLedger) -> None:
fmt = example_ledger.format_decimal
assert fmt.precisions == {
"ABC": 0,
"GLD": 0,
"IRAUSD": 2,
"ITOT": 0,
"RGAGX": 3,
"USD": 2,
"VACHR": 0,
"VBMPX": 3,
"VMMXX": 4,
"VEA": 0,
"VHT": 0,
"XYZ": 0,
}
def test_format_decimal(example_ledger: FavaLedger) -> None:
fmt = example_ledger.format_decimal
assert fmt(Decimal("12.333"), "USD") == "12.33"
assert fmt(Decimal("12.33"), "USD") == "12.33"
assert fmt(Decimal("12341234.33"), "USD") == "12341234.33"
assert fmt(Decimal("12.333"), None) == "12.33"
def test_format_decimal_locale(
example_ledger: FavaLedger,
monkeypatch: pytest.MonkeyPatch,
) -> None:
fmt = example_ledger.format_decimal
monkeypatch.setattr(example_ledger.fava_options, "locale", "en_IN")
fmt.load_file()
assert fmt(Decimal("1111111.333"), "USD") == "11,11,111.33"
assert fmt(Decimal("11.333"), "USD") == "11.33"
assert fmt(Decimal("11.3333"), None) == "11.33"
monkeypatch.setattr(example_ledger.fava_options, "locale", "de_DE")
fmt.load_file()
assert fmt(Decimal("1111111.333"), "USD") == "1.111.111,33"
fmt.load_file()
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,466
|
beancount/fava
|
refs/heads/main
|
/tests/test_core_filters.py
|
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING
import pytest
from beancount.core.account import has_component
from fava.beans import create
from fava.beans.account import get_entry_accounts
from fava.core.filters import AccountFilter
from fava.core.filters import AdvancedFilter
from fava.core.filters import FilterError
from fava.core.filters import FilterSyntaxLexer
from fava.core.filters import Match
from fava.core.filters import TimeFilter
if TYPE_CHECKING: # pragma: no cover
from fava.core import FavaLedger
def test_match() -> None:
assert Match("asdf")("asdf")
assert Match("asdf")("asdfasdf")
assert Match("asdf")("aasdfasdf")
assert Match("^asdf")("asdfasdf")
assert not Match("asdf")("fdsadfs")
assert not Match("^asdf")("aasdfasdf")
assert Match("(((")("(((")
def test_lexer_basic() -> None:
lex = FilterSyntaxLexer().lex
data = "#some_tag ^some_link -^some_link"
assert [(tok.type, tok.value) for tok in lex(data)] == [
("TAG", "some_tag"),
("LINK", "some_link"),
("-", "-"),
("LINK", "some_link"),
]
data = "'string' string \"string\""
assert [(tok.type, tok.value) for tok in lex(data)] == [
("STRING", "string"),
("STRING", "string"),
("STRING", "string"),
]
with pytest.raises(FilterError):
list(lex("|"))
def test_lexer_literals_in_string() -> None:
lex = FilterSyntaxLexer().lex
data = "string-2-2 string"
assert [(tok.type, tok.value) for tok in lex(data)] == [
("STRING", "string-2-2"),
("STRING", "string"),
]
def test_lexer_key() -> None:
lex = FilterSyntaxLexer().lex
data = 'payee:asdfasdf ^some_link somekey:"testtest" '
assert [(tok.type, tok.value) for tok in lex(data)] == [
("KEY", "payee"),
("STRING", "asdfasdf"),
("LINK", "some_link"),
("KEY", "somekey"),
("STRING", "testtest"),
]
def test_lexer_parentheses() -> None:
lex = FilterSyntaxLexer().lex
data = "(payee:asdfasdf ^some_link) (somekey:'testtest')"
assert [(tok.type, tok.value) for tok in lex(data)] == [
("(", "("),
("KEY", "payee"),
("STRING", "asdfasdf"),
("LINK", "some_link"),
(")", ")"),
("(", "("),
("KEY", "somekey"),
("STRING", "testtest"),
(")", ")"),
]
def test_filterexception() -> None:
with pytest.raises(FilterError, match='Illegal character """ in filter'):
AdvancedFilter('who:"fff')
with pytest.raises(FilterError, match="Failed to parse filter"):
AdvancedFilter('any(who:"Martin"')
@pytest.mark.parametrize(
("string", "number"),
[
('any(account:"Assets:US:ETrade")', 48),
('all(-account:"Assets:US:ETrade")', 1826 - 48),
("#test", 2),
("#test,#nomatch", 2),
("-#nomatch", 1826),
("-#nomatch -#nomatch", 1826),
("-#nomatch -#test", 1824),
("-#test", 1824),
("^test-link", 3),
("^test-link,#test", 4),
("^test-link -#test", 2),
("payee:BayBook", 62),
("BayBook", 62),
("(payee:BayBook, #test,#nomatch) -#nomatch", 64),
('payee:"BayBo.*"', 62),
('payee:"baybo.*"', 62),
(r'number:"\d*"', 3),
('not_a_meta_key:".*"', 0),
('name:".*ETF"', 4),
('name:".*ETF$"', 3),
('name:".*etf"', 4),
('name:".*etf$"', 3),
('any(overage:"GB$")', 1),
],
)
def test_advanced_filter(
example_ledger: FavaLedger,
string: str,
number: int,
) -> None:
filter_ = AdvancedFilter(string)
filtered_entries = filter_.apply(example_ledger.all_entries)
assert len(filtered_entries) == number
def test_null_meta_posting() -> None:
filter_ = AdvancedFilter('any(some_meta:"1")')
txn = create.transaction(
{},
datetime.date(2017, 12, 12),
"*",
"",
"",
frozenset(),
frozenset(),
[create.posting("Assets:ETrade:Cash", "100 USD")],
)
assert txn.postings[0].meta is None
assert len(filter_.apply([txn])) == 0
def test_account_filter(example_ledger: FavaLedger) -> None:
account_filter = AccountFilter("Assets")
filtered_entries = account_filter.apply(example_ledger.all_entries)
assert len(filtered_entries) == 541
for entry in filtered_entries:
assert any(
has_component(a, "Assets") for a in get_entry_accounts(entry)
)
account_filter = AccountFilter(".*US:State")
filtered_entries = account_filter.apply(example_ledger.all_entries)
assert len(filtered_entries) == 67
def test_time_filter(example_ledger: FavaLedger) -> None:
time_filter = TimeFilter(
example_ledger.options,
example_ledger.fava_options,
"2017",
)
date_range = time_filter.date_range
assert date_range
assert date_range.begin == datetime.date(2017, 1, 1)
assert date_range.end == datetime.date(2018, 1, 1)
filtered_entries = time_filter.apply(example_ledger.all_entries)
assert len(filtered_entries) == 83
time_filter = TimeFilter(
example_ledger.options,
example_ledger.fava_options,
"1000",
)
filtered_entries = time_filter.apply(example_ledger.all_entries)
assert not filtered_entries
with pytest.raises(FilterError):
TimeFilter(
example_ledger.options,
example_ledger.fava_options,
"no_date",
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,467
|
beancount/fava
|
refs/heads/main
|
/src/fava/core/accounts.py
|
"""Account close date and metadata."""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from typing import TYPE_CHECKING
from fava.beans.abc import Balance
from fava.beans.abc import Close
from fava.beans.flags import FLAG_UNREALIZED
from fava.beans.funcs import hash_entry
from fava.core.conversion import units
from fava.core.group_entries import group_entries_by_account
from fava.core.group_entries import TransactionPosting
from fava.core.module_base import FavaModule
from fava.core.tree import Tree
if TYPE_CHECKING: # pragma: no cover
from fava.beans.abc import Directive
from fava.beans.abc import Meta
from fava.core.tree import TreeNode
def get_last_entry(
txn_postings: list[Directive | TransactionPosting],
) -> Directive | None:
"""Last entry."""
for txn_posting in reversed(txn_postings):
if (
isinstance(txn_posting, TransactionPosting)
and txn_posting.transaction.flag == FLAG_UNREALIZED
):
continue
if isinstance(txn_posting, TransactionPosting):
return txn_posting.transaction
return txn_posting
return None
def uptodate_status(
txn_postings: list[Directive | TransactionPosting],
) -> str | None:
"""Status of the last balance or transaction.
Args:
txn_postings: The TransactionPosting for the account.
Returns:
A status string for the last balance or transaction of the account.
- 'green': A balance check that passed.
- 'red': A balance check that failed.
- 'yellow': Not a balance check.
"""
for txn_posting in reversed(txn_postings):
if isinstance(txn_posting, Balance):
if txn_posting.diff_amount:
return "red"
return "green"
if (
isinstance(txn_posting, TransactionPosting)
and txn_posting.transaction.flag != FLAG_UNREALIZED
):
return "yellow"
return None
def balance_string(tree_node: TreeNode) -> str:
"""Balance directive for the given account for today."""
account = tree_node.name
today = str(datetime.date.today())
res = ""
for pos in units(tree_node.balance).amounts():
res += (
f"{today} balance {account:<28} {pos.number:>15} {pos.currency}\n"
)
return res
@dataclass(frozen=True)
class LastEntry:
"""Date and hash of the last entry for an account."""
#: The entry date.
date: datetime.date
#: The entry hash.
entry_hash: str
@dataclass
class AccountData:
"""Holds information about an account."""
#: The date on which this account is closed (or datetime.date.max).
close_date: datetime.date | None = None
#: The metadata of the Open entry of this account.
meta: Meta = field(default_factory=dict)
#: Uptodate status. Is only computed if the account has a
#: "fava-uptodate-indication" meta attribute.
uptodate_status: str | None = None
#: Balance directive if this account has an uptodate status.
balance_string: str | None = None
#: The last entry of the account (unless it is a close Entry)
last_entry: LastEntry | None = None
class AccountDict(FavaModule, Dict[str, AccountData]):
"""Account info dictionary."""
EMPTY = AccountData()
def __missing__(self, key: str) -> AccountData:
return self.EMPTY
def setdefault(
self,
key: str,
_: AccountData | None = None,
) -> AccountData:
"""Get the account of the given name, insert one if it is missing."""
if key not in self:
self[key] = AccountData()
return self[key]
def load_file(self) -> None:
self.clear()
entries_by_account = group_entries_by_account(self.ledger.all_entries)
tree = Tree(self.ledger.all_entries)
for open_entry in self.ledger.all_entries_by_type.Open:
meta = open_entry.meta
account_data = self.setdefault(open_entry.account)
account_data.meta = meta
txn_postings = entries_by_account[open_entry.account]
last = get_last_entry(txn_postings)
if last is not None and not isinstance(last, Close):
account_data.last_entry = LastEntry(
date=last.date,
entry_hash=hash_entry(last),
)
if meta.get("fava-uptodate-indication"):
account_data.uptodate_status = uptodate_status(txn_postings)
if account_data.uptodate_status != "green":
account_data.balance_string = balance_string(
tree.get(open_entry.account),
)
for close in self.ledger.all_entries_by_type.Close:
self.setdefault(close.account).close_date = close.date
def all_balance_directives(self) -> str:
"""Balance directives for all accounts."""
return "".join(
account_details.balance_string
for account_details in self.values()
if account_details.balance_string
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,468
|
beancount/fava
|
refs/heads/main
|
/tests/test_templates.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
from flask import get_template_attribute
from fava.beans import create
if TYPE_CHECKING: # pragma: no cover
from flask import Flask
from fava.core import FavaLedger
def test_render_amount(app: Flask, example_ledger: FavaLedger) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
macro = get_template_attribute(
"macros/_commodity_macros.html",
"render_amount",
)
res = '<span class="num" title="US Dollar">10.00 USD</span>'
assert macro(example_ledger, create.amount("10 USD")) == res
res = '<span class="num"></span>'
assert macro(example_ledger, None) == res
res = '<span class="num" title="TEST">10.00 TEST</span>'
assert macro(example_ledger, create.amount("10 TEST")) == res
def test_account_indicator(app: Flask, example_ledger: FavaLedger) -> None:
with app.test_request_context(""):
macro = get_template_attribute(
"macros/_account_macros.html",
"indicator",
)
assert not macro(example_ledger, "NONEXISTING")
yellow_status = macro(example_ledger, "Assets:US:BofA:Checking")
assert "yellow" in yellow_status
def test_account_name(app: Flask, example_ledger: FavaLedger) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
macro = get_template_attribute(
"macros/_account_macros.html",
"account_name",
)
assert (
macro(example_ledger, "NONEXISTING")
== '<a href="/long-example/account/NONEXISTING/"'
' class="account">NONEXISTING</a>'
)
|
{"/tests/test_application.py": ["/tests/conftest.py"], "/tests/test_serialisation.py": ["/tests/conftest.py"], "/tests/test_json_api.py": ["/tests/conftest.py"], "/tests/test_core_file.py": ["/tests/conftest.py"], "/tests/test_extensions.py": ["/tests/conftest.py"], "/tests/test_core_query_shell.py": ["/tests/conftest.py"], "/tests/test_core_ingest.py": ["/tests/conftest.py"], "/tests/test_core_tree.py": ["/tests/conftest.py"], "/tests/test_internal_api.py": ["/tests/conftest.py"], "/tests/test_core_charts.py": ["/tests/conftest.py"]}
|
31,473
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/packages.py
|
from django.contrib.auth import authenticate, login, get_user_model
from django.forms import modelformset_factory
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.contrib import messages
from django.urls import reverse_lazy
from django.views.generic import (
ListView, DetailView, DeleteView, FormView,
CreateView, UpdateView, TemplateView
)
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,474
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/admin.py
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from main.models import Image, Recipe, Category, Comment
class ImageInlineAdmin(admin.TabularInline):
model = Image
fields = ('image', )
max_num = 5
@admin.register(Recipe)
class RecipeAdmin(admin.ModelAdmin):
inlines = [ImageInlineAdmin]
admin.site.register(Category)
admin.site.register(Image)
admin.site.register(Comment)
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,475
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/service.py
|
from main.forms import UserRegistrationForm
class UserRegistrationService:
@classmethod
def create_user(cls, request):
form = UserRegistrationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
cd = form.cleaned_data
user.email = cd['email']
user.username = cd['username']
user.set_password(cd['password'])
user.save()
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,476
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/urls.py
|
from django.urls import path
from .views import (
MainPageView, CategoryDetailView,
RecipeDetailView, add_recipe, RecipeUpdateView,
RecipeDeleteView, UserRegistrationView, AuthorizationView
)
urlpatterns = [
path('', MainPageView.as_view(), name='home'),
path(
'category/<str:slug>/', CategoryDetailView.as_view(),
name='category'
),
path('recipe-detail/<int:pk>/', RecipeDetailView.as_view(), name='detail'),
path('add-recipe/', add_recipe, name='add-recipe'),
path(
'update-recipe/<int:pk>/',
RecipeUpdateView.as_view(), name='update-recipe'
),
path(
'delete-recipe/<int:pk>/', RecipeDeleteView.as_view(),
name='delete-recipe'
),
path(
'registration-detail/', UserRegistrationView.as_view(),
name='registration'
),
path('login/', AuthorizationView.as_view(), name='login')
]
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,477
|
tinystone21/cooking_blog
|
refs/heads/master
|
/account/models.py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=44)
image = models.ImageField(upload_to='users')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.username
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,478
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/forms.py
|
from datetime import datetime
from django import forms
from django.contrib.auth import get_user_model
from main.models import Recipe, Image, Comment
class RecipeForm(forms.ModelForm):
created = forms.DateTimeField(
initial=datetime.now().strftime('%Y-%m-%d'),
required=False
)
class Meta:
model = Recipe
fields = '__all__'
class ImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image', )
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text', )
class UserRegistrationForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ('email', 'password', 'username')
class AuthorizationForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(max_length=100)
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,479
|
tinystone21/cooking_blog
|
refs/heads/master
|
/main/views.py
|
from main.packages import *
from main.forms import (
RecipeForm, ImageForm, CommentForm,
UserRegistrationForm, AuthorizationForm
)
from main.models import Recipe, Comment, Category, Image
from main.service import UserRegistrationService
User = get_user_model()
class MainPageView(ListView):
model = Recipe
template_name = 'index.html'
context_object_name = 'recipes'
class CategoryDetailView(DetailView):
model = Category
template_name = 'category-detail.html'
context_object_name = 'category'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.slug = kwargs.get('slug', None)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['recipes'] = Recipe.objects.filter(category_id=self.slug)
return context
class RecipeDetailView(DetailView, FormView):
model = Recipe
template_name = 'recipe-detail.html'
context_object_name = 'recipe'
form_class = CommentForm
def post(self, request, *args, **kwargs):
print(request.POST)
recipe = self.get_object()
Comment.objects.create(
recipe=recipe, user=request.user,
text=request.POST['text']
)
return HttpResponseRedirect(self.get_object().get_absolute_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
image = self.get_object().get_image
context['images'] = self.get_object().images.exclude(id=image.id)
return context
def add_recipe(request):
ImageFormSet = modelformset_factory(Image, form=ImageForm, max_num=5)
if request.method == 'POST':
recipe_form = RecipeForm(request.POST)
formset = ImageFormSet(
request.POST, request.FILES,
queryset=Image.objects.none()
)
if recipe_form.is_valid() and formset.is_valid():
recipe = recipe_form.save()
for form in formset.cleaned_data:
image = form['image']
Image.objects.create(image=image, recipe=recipe)
return redirect(recipe.get_absolute_url())
else:
recipe_form = RecipeForm()
formset = ImageFormSet(queryset=Image.objects.none())
return render(request, 'add-recipe.html', locals())
class RecipeUpdateView(UpdateView):
model = Recipe
template_name = 'update-recipe.html'
fields = '__all__'
class RecipeDeleteView(DeleteView):
model = Recipe
template_name = 'delete-recipe.html'
success_url = reverse_lazy('home')
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
messages.add_message(
request, messages.SUCCESS,
'Successfully deleted'
)
return HttpResponseRedirect(success_url)
class CommentCreateView(FormView):
model = Comment
template_name = 'add-comment.html'
class UserRegistrationView(CreateView):
form_class = UserRegistrationForm
template_name = 'registration-detail.html'
success_url = reverse_lazy('home')
def post(self, request, *args, **kwargs):
UserRegistrationService.create_user(request)
return HttpResponseRedirect(reverse_lazy('home'))
def form_valid(self, form):
super().form_valid(form)
return redirect('home')
def form_invalid(self, form):
form.save()
super().form_invalid(form)
return redirect('home')
class AuthorizationView(TemplateView):
template_name = 'login.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = AuthorizationForm()
return context
def post(self, request):
form = AuthorizationForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(email=cd['email'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return redirect('home')
else:
messages.add_message(request, messages.ERROR,
'Incorrect login or password')
return redirect('login')
return HttpResponseRedirect(reverse_lazy('home'))
|
{"/main/service.py": ["/main/forms.py"], "/main/urls.py": ["/main/views.py"], "/main/views.py": ["/main/packages.py", "/main/forms.py", "/main/service.py"]}
|
31,482
|
Niraj-Kamdar/badger-rewards-bot
|
refs/heads/main
|
/bot.py
|
# bot.py
import json
import logging
import os
from discord.ext import commands, tasks
from web3 import Web3
from utils import fetch_rewards_tree, formatter, summary
UPDATE_INTERVAL_SECONDS = 300
TEST = True if os.getenv("ENV") == "TEST" else False
WEB3_INFURA_HTTP_URL = os.getenv("WEB3_INFURA_HTTP_URL")
logging.basicConfig(
level=logging.INFO
)
bot = commands.Bot(command_prefix="/")
logger = logging.getLogger("rewards-bot")
web3 = Web3(
Web3.HTTPProvider(WEB3_INFURA_HTTP_URL)
)
with open("./abis/BadgerTreeV2.json") as badger_tree_abi_file:
badger_tree_abi = json.load(badger_tree_abi_file)
badger_tree_address = os.getenv("BADGER_TREE_ADDRESS")
contract = web3.eth.contract(
address=web3.toChecksumAddress(badger_tree_address),
abi=badger_tree_abi,
)
event_filter = contract.events.RootUpdated.createFilter(fromBlock="0x0")
cache = {}
def _parse_merkle_data(
cycle, root, contentHash, startBlock, endBlock, timestamp, blockNumber
):
current_merkle_data = dict(
cycle=cycle,
root=web3.toHex(root),
contentHash=web3.toHex(contentHash),
startBlock=startBlock,
endBlock=endBlock,
timestamp=timestamp,
blockNumber=blockNumber,
)
cache["current_merkle_data"] = current_merkle_data
current_rewards_tree = fetch_rewards_tree(current_merkle_data, test=TEST)
cache["reward_dist_summary"] = summary(current_rewards_tree)
cache["formatted_data"] = formatter(
{**current_merkle_data, **cache["reward_dist_summary"]}
)
@bot.command(name="rewards")
async def rewards(ctx):
formatted_data = cache["formatted_data"]
await ctx.send(embed=formatted_data)
@bot.event
async def on_ready():
logger.info(f"Logged in as {bot.user.name} {bot.user.id}")
@tasks.loop(seconds=UPDATE_INTERVAL_SECONDS)
async def update_rewards():
cycle = contract.functions.currentCycle().call()
if cache["current_merkle_data"]["cycle"] != cycle:
rewards_data = contract.functions.getCurrentMerkleData().call()
_parse_merkle_data(cycle, *rewards_data)
logger.info(f"New merkle tree: {cache['current_merkle_data']}")
formatted_data = cache["formatted_data"]
channel = bot.get_channel(int(os.getenv("DISCORD_CHANNEL_ID")))
await channel.send(embed=formatted_data)
def start():
rewards_data = contract.functions.getCurrentMerkleData().call()
cycle = contract.functions.currentCycle().call()
_parse_merkle_data(cycle, *rewards_data)
update_rewards.start()
bot.run(os.getenv("BOT_TOKEN_REWARDS"))
if __name__ == "__main__":
start()
|
{"/bot.py": ["/utils.py"], "/utils.py": ["/cgMapping.py"]}
|
31,483
|
Niraj-Kamdar/badger-rewards-bot
|
refs/heads/main
|
/utils.py
|
import json
import os
from collections import Counter, defaultdict
import boto3
import discord
from pycoingecko import CoinGeckoAPI
from cgMapping import cgMapping
cg = CoinGeckoAPI()
def download_tree(fileName, test=False):
if test:
with open("data/rewards.json") as f:
rewards = json.load(f)
return rewards
s3 = boto3.client(
"s3",
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
)
upload_bucket = "badger-json"
upload_file_key = "rewards/" + fileName
s3_clientobj = s3.get_object(Bucket=upload_bucket, Key=upload_file_key)
s3_clientdata = s3_clientobj["Body"].read().decode("utf-8")
return json.loads(s3_clientdata)
def fetch_rewards_tree(merkle, test=False):
pastFile = f"rewards-1-{merkle['contentHash']}.json"
currentTree = download_tree(pastFile, test)
if not test:
assert currentTree["merkleRoot"] == merkle["root"]
lastUpdateOnChain = merkle["blockNumber"]
lastUpdate = int(currentTree["endBlock"])
# Ensure file tracks block within 1 day of upload
assert abs(lastUpdate - lastUpdateOnChain) < 6500
# Ensure upload was after file tracked
assert lastUpdateOnChain >= lastUpdate
return currentTree
def formatter(merkle_data):
prepare_data = "\n".join(map(lambda x: f"{x[0]: <33} {x[1]}", merkle_data.items()))
disc = prepare_data.split("\n")
cycle = disc[0].split("cycle")
spaceCycle = cycle[1].split(" ")
cutCycle = "Cycle:" + spaceCycle[29]
root = disc[1].split("root")
conHash = disc[2].split("contentHash")[1]
startBlock = disc[3].split("startBlock")[1]
endBlock = disc[4].split("endBlock")[1]
timestamp = disc[5].split("timestamp")[1]
blockNumber = disc[6].split("blockNumber")[1]
badger = disc[7].split(",")
defiDollar = disc[8].split(",")
defiCount = round(float(defiDollar[0].split(":")[1]), 2)
defiSumUsd = round(float(defiDollar[1].split(" ")[2]), 2)
defiSum = round(float(defiDollar[2].split(" ")[2]), 2)
defiAverageUsd = round(float(defiDollar[3].split(" ")[2]), 2)
dColon = defiDollar[4].split(" ")
dAverage = round(float(dColon[2].split("}")[0]), 2)
count = round(float(badger[0].split(":")[1]), 2)
bSumUsd = round(float(badger[1].split(" ")[2]), 2)
bSum = round(float(badger[2].split(" ")[2]), 2)
bAverageUsd = round(float(badger[3].split(" ")[2]), 2)
bColon = badger[4].split(" ")
bAverage = round(float(bColon[2].split("}")[0]), 2)
formatted_data = discord.Embed(title=cutCycle, color=0xE0A308)
formatted_data.add_field(name="Root", value=root[1], inline=False)
formatted_data.add_field(name="ContentHash", value=conHash, inline=False)
formatted_data.add_field(name="StartBlock", value=startBlock, inline=True)
formatted_data.add_field(name="EndBlock", value=endBlock, inline=True)
formatted_data.add_field(name="\u200b", value="\u200b", inline=False)
formatted_data.add_field(
name="Total Badger Distributed (BADGER)", value=bSum, inline=True
)
formatted_data.add_field(
name="Total Badger Distributed (USD)", value="$" + str(bSumUsd), inline=True
)
formatted_data.add_field(name="\u200b", value="\u200b", inline=False)
formatted_data.add_field(
name="Average Badger Distributed (BADGER)", value=bAverage, inline=True
)
formatted_data.add_field(
name="Average Badger Distributed (USD)",
value="$" + str(bAverageUsd),
inline=True,
)
formatted_data.add_field(name="\u200b", value="\u200b", inline=False)
formatted_data.add_field(
name="Total DefiDollar Distributed (USD)",
value="$" + str(defiSumUsd),
inline=True,
)
formatted_data.add_field(
name="Average DefiDollar Distributed (USD)",
value="$" + str(defiAverageUsd),
inline=True,
)
return formatted_data
def summary(rewards_tree):
token_dist_data = defaultdict(lambda: defaultdict(list))
summary = defaultdict(Counter)
for sett, settDist in rewards_tree["userData"].items():
for userDist in settDist.values():
for token in userDist["totals"]:
# Todo: add support for digg rewards
# Need to fetch actual reward in digg from the share
if token != "0x798D1bE841a82a273720CE31c822C61a67a601C3":
token_dist_data[sett][token].append(userDist["totals"][token])
for sett, value in token_dist_data.items():
for token in value:
summary[cgMapping[token]["name"]] += _list_summary(
token_dist_data[sett][token],
cgMapping[token]["id"],
cgMapping[token]["decimals"],
)
for token in summary:
summary[token]["mean"] = summary[token]["sum"] / summary[token]["count"]
summary[token]["mean(usd)"] = (
summary[token]["sum(usd)"] / summary[token]["count"]
)
return summary
def _list_summary(array, cgTokenId, decimals):
array = list(map(lambda x: x / 10 ** decimals, array))
tokenPrice = cg.get_price(ids=cgTokenId, vs_currencies="usd")
usdPrice = tokenPrice[cgTokenId]["usd"]
summary = {
"count": len(array),
"sum": sum(array),
}
summary["sum(usd)"] = summary["sum"] * usdPrice
return Counter(summary)
|
{"/bot.py": ["/utils.py"], "/utils.py": ["/cgMapping.py"]}
|
31,484
|
Niraj-Kamdar/badger-rewards-bot
|
refs/heads/main
|
/cgMapping.py
|
cgMapping = {
"0x3472A5A71965499acd81997a54BBA8D852C6E53d": {
"id": "badger-dao",
"decimals": 18,
"name": "badger",
},
"0x798D1bE841a82a273720CE31c822C61a67a601C3": {
"id": "digg",
"decimals": 9,
"name": "digg",
},
"0x20c36f062a31865bED8a5B1e512D9a1A20AA333A": {
"id": "defidollar",
"decimals": 18,
"name": "defidollar",
},
}
|
{"/bot.py": ["/utils.py"], "/utils.py": ["/cgMapping.py"]}
|
31,497
|
orena1/neuron_reduce
|
refs/heads/master
|
/example/example.py
|
#reduction of L5_PC using Neuron_Reduce
from __future__ import division
import os
import logging
from neuron import gui,h
import numpy as np
import neuron_reduce
import time
import matplotlib.pyplot as plt
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
#Create a L5_PC model
h.load_file('L5PCbiophys3.hoc')
h.load_file("import3d.hoc")
h.load_file('L5PCtemplate.hoc')
complex_cell = h.L5PCtemplate('cell1.asc')
h.celsius = 37
h.v_init = complex_cell.soma[0].e_pas
#Add synapses to the model
synapses_list, netstims_list, netcons_list, randoms_list = [], [], [] ,[]
all_segments = [i for j in map(list,list(complex_cell.apical)) for i in j] + [i for j in map(list,list(complex_cell.basal)) for i in j]
len_per_segment = np.array([seg.sec.L/seg.sec.nseg for seg in all_segments])
rnd = np.random.RandomState(10)
for i in range(10000):
seg_for_synapse = rnd.choice(all_segments, p=len_per_segment/sum(len_per_segment))
synapses_list.append(h.Exp2Syn(seg_for_synapse))
if rnd.uniform()<0.85:
e_syn, tau1, tau2, spike_interval, syn_weight = 0, 0.3, 1.8, 1000/2.5, 0.0016
else:
e_syn, tau1, tau2, spike_interval, syn_weight = -86, 1, 8, 1000/15.0, 0.0008
synapses_list[i].e, synapses_list[i].tau1, synapses_list[i].tau2 = e_syn, tau1, tau2
netstims_list.append(h.NetStim())
netstims_list[i].interval, netstims_list[i].number, netstims_list[i].start, netstims_list[i].noise = spike_interval, 9e9, 100, 1
randoms_list.append(h.Random())
randoms_list[i].Random123(i)
randoms_list[i].negexp(1)
netstims_list[i].noiseFromRandom(randoms_list[i])
netcons_list.append(h.NetCon(netstims_list[i], synapses_list[i] ))
netcons_list[i].delay, netcons_list[i].weight[0] = 0, syn_weight
#Simulate the full neuron for 1 seconds
soma_v = h.Vector()
soma_v.record(complex_cell.soma[0](0.5)._ref_v)
time_v = h.Vector()
time_v.record(h._ref_t)
h.tstop = 1000
st = time.time()
h.run()
print('complex cell simulation time {:.4f}'.format(time.time()-st))
complex_cell_v = list(soma_v)
#apply Neuron_Reduce to simplify the cell
reduced_cell, synapses_list, netcons_list = neuron_reduce.subtree_reductor(complex_cell, synapses_list, netcons_list, reduction_frequency=0)
for r in randoms_list:r.seq(1) #reset random
#Running the simulation again but now on the reduced cell
st = time.time()
h.run()
print('reduced cell simulation time {:.4f}'.format(time.time()-st))
reduced_cell_v = list(soma_v)
#plotting the results
plt.figure()
plt.plot(time_v, complex_cell_v, label='complex cell')
plt.plot(time_v, reduced_cell_v, label='redcued cell')
plt.show()
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,498
|
orena1/neuron_reduce
|
refs/heads/master
|
/tests/TestsFiles/Test_8_C1_Marasco/Morphology_replace_user05.py
|
name = 'geo5038801mod'
apical_dendriteEnd = 79
total_user5 = 70
f = open(name + '.hoc','r')
new_ls = ''
for line in f:
if 'user5[' in line and 'create' not in line and 'append' not in line:
parts = line.split('user5[')
#sdfs
if '{user5[51] connect user5[52](0), 1}' in line:
pass
#asdas
pass
for i in range(len(parts)):
if i in range(1,len(parts)):
#asdas
parts[i]
num = int(parts[i][:parts[i].index(']')])
num = num + apical_dendriteEnd
new_ls += 'apic[' + str(num) + ']' + parts[i][parts[i].index(']')+1:].replace('apical_dendrite','apic')
else:
new_ls += parts[i].replace('apical_dendrite','apic')
elif 'create user5' in line:
new_ls +='\n'
elif 'user5al.append()' in line:
new_ls +=' for i=' + str(apical_dendriteEnd) + ', ' + \
str(apical_dendriteEnd + total_user5-1) +' apic[i] user5al.append()\n'
elif 'user5[i] all.append()' in line:
new_ls +='\n'
elif 'apical_dendrite[i] all.append()' in line:
new_ls += ' for i=0, '+ str(apical_dendriteEnd + total_user5-1) +' apic[i] all.append()'
elif 'apical_dendrite' in line and 'create' not in line:
new_ls += line.replace('apical_dendrite','apic').replace('apical_dendrite','apic').replace('apical_dendrite','apic')
elif 'create apical_dendrite' in line:
parts = line.split('apical_dendrite[')
new_ls += parts[0] + 'apic[' + str(apical_dendriteEnd+total_user5) + parts[1][parts[1].index(']'):]
elif 'template' in line:
new_ls += line.replace(name, name + 'Mod')
else:
new_ls += line
f.close()
f1 = open(name + 'Mod.hoc', 'w')
f1.write(new_ls)
f1.close()
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,499
|
orena1/neuron_reduce
|
refs/heads/master
|
/tests/run_all_tests.py
|
#!/usr/bin/env python
import os
WRITE_UNIT_TEST_VECTORS = False
PLOT_VOLTAGES = False
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
TESTDATA_PATH = os.path.join(BASE_PATH, 'TestsFiles')
def run_reduce(morphology_file,
model_file,
frequency,
synapse_file,
voltage_file,
create_type,
celsius,
write_unit_test_vectors=WRITE_UNIT_TEST_VECTORS,
plot_voltages=PLOT_VOLTAGES,
reduced_model_file='model.hoc',
manual_total_nsegs=-1):
args = ("python " +
os.path.join(BASE_PATH, "test_script_helper.py ") +
' '.join([str(a) for a in (morphology_file,
model_file,
reduced_model_file,
frequency,
manual_total_nsegs,
synapse_file,
voltage_file,
write_unit_test_vectors,
plot_voltages,
create_type,
celsius)]))
assert os.system(args) == 0
def test1():
'''Test 1 passive neuron'''
path = os.path.join(TESTDATA_PATH, 'Test_1')
kwargs = dict(morphology_file=os.path.join(path, "2013_03_06_cell08_876_H41_05_Cell2.ASC"),
model_file=os.path.join(path, "model.hoc"),
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
create_type='basic',
celsius=37)
for frequency in (0, 10, 38, 200):
kwargs['frequency'] = frequency
kwargs['voltage_file'] = os.path.join(path, "voltage_vectors_for_unit_test_%s.txt" % frequency)
run_reduce(**kwargs)
def test2():
'''Test 2 passive neuron not deleting the axon'''
path = os.path.join(TESTDATA_PATH, 'Test_2')
run_reduce(morphology_file=os.path.join(path, "2013_03_06_cell08_876_H41_05_Cell2.ASC"),
model_file=os.path.join(path, "model.hoc"),
frequency=38,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='basic',
celsius=37,
)
def test3_a71075_passive():
path = os.path.join(TESTDATA_PATH, 'Test_3')
run_reduce(morphology_file=os.path.join(path, "dend-C050800E2_cor_axon-C120398A-P2_-_Scale_x1.000_y1.050_z1.000_-_Clone_81.asc"),
model_file=os.path.join(path, "cADpyr230_L4_SS_4_dend_C050800E2_cor_axon_C120398A_P2___Scale_x1_000_y1_050_z1_000___Clone_81.hoc"),
frequency=38,
synapse_file=os.path.join(path, "synapse_fromh5a71075.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='bbp',
celsius=34,
)
def test4_amsalem_2016():
path = os.path.join(TESTDATA_PATH, 'Test_4_LBC_amsalem/')
run_reduce(morphology_file=os.path.join(path, "C230300D1.asc"),
model_file=os.path.join(path, "cNAC187_L23_LBC_3_C230300D1_new_new_fit.hoc"),
frequency=9,
synapse_file=os.path.join(path, "synapse_fromh5a71075.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='bbpactive',
celsius=34
)
def test5_Hay_2011_active_dendrite():
path = os.path.join(TESTDATA_PATH, 'Test_5_Hay_2011/')
run_reduce(morphology_file=os.path.join(path, "cell1.asc"),
model_file=os.path.join(path, "L5PCtemplate.hoc"),
frequency=38,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='hay',
celsius=37)
def test6_L4_LBC_cNAC187_5_for_run():
path = os.path.join(TESTDATA_PATH, 'L4_LBC_cNAC187_5_for_run/')
run_reduce(morphology_file=os.path.join(path, "2013_03_06_cell08_876_H41_05_Cell2.ASC"),
model_file=os.path.join(path, "cNAC187_L4_LBC_8e834c24cb.hoc"),
frequency=0,
synapse_file=os.path.join(path, "1487081844_732516.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test_0.txt"),
create_type='bbpnew',
celsius=34,
)
def test7_Almog_Korngreen_2014():
path = os.path.join(TESTDATA_PATH, 'Test_7_Almog/')
run_reduce(morphology_file=path,
model_file=os.path.join(path, "A140612_1.hoc"),
frequency=0,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test_0.txt"),
create_type='almog',
celsius=34,
)
def test8_Marasco_Limongiello_Migliore_2012():
'''Test 8 Marasco Limongiello Migliore 2012'''
path = os.path.join(TESTDATA_PATH, 'Test_8_C1_Marasco/')
run_reduce(morphology_file=path,
model_file=os.path.join(path, "geo5038801modMod.hoc"),
frequency=0,
synapse_file=os.path.join(path, "1487081844_732516.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test_0.txt"),
create_type='almog',
celsius=34,
)
def test9_model_48310820():
'''Test 9 model 48310820 (L5PC) from the Allen celltypes data base'''
path = os.path.join(TESTDATA_PATH, 'Test_9_Allen_483108201/')
run_reduce(morphology_file=os.path.join(path, "reconstruction.swc"),
model_file=os.path.join(path, "AllenTemplate.hoc"),
frequency=0,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='allen',
celsius=34,
)
def test10_model_47804508():
'''Test 10 model 47804508 (L1) from the Allen celltypes data base'''
path = os.path.join(TESTDATA_PATH, 'Test_10_Allen_47804508/')
run_reduce(morphology_file=os.path.join(path, "reconstruction.swc"),
model_file=os.path.join(path, "AllenTemplate.hoc"),
frequency=0,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='allen',
celsius=34,
)
def test11_human_Eyal_2016():
'''Test 11 model Human L2/3 Cell from Eyal et al 2016'''
path = os.path.join(TESTDATA_PATH, 'Test_11_Human_L2_3_Eyal/')
run_reduce(morphology_file=os.path.join(path, "2013_03_06_cell08_876_H41_05_Cell2.ASC"),
model_file=os.path.join(path, "model_0603_cell08_cm045.hoc"),
frequency=0,
synapse_file=os.path.join(path, "origRandomSynapses-10000"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='human',
celsius=37,
)
def test12_TPC_Markram_2016():
'''Test 12 Tufted Pyramidal Cell (L6) Markram et al. Cell (2015) ----'''
path = os.path.join(TESTDATA_PATH, 'Test_12_TPC_L6_Markram/')
run_reduce(morphology_file=os.path.join(path, "dend-tkb070125a3_ch1_cc2_b_hw_60x_1_axon-tkb060223b3_ch1_cc2_o_ps_60x_1_-_Clone_5.asc"),
model_file=os.path.join(path, "cADpyr231_L6_TPC_L1_44f2206f70.hoc"),
frequency=0,
synapse_file=os.path.join(path, "synapses_location.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type='bbpnew',
celsius=34,
)
def test13_dbc_Markram_2015():
'''Test 13 Double Bouquet Cell (L4) Markram et al. Cell (2015)'''
path = os.path.join(TESTDATA_PATH, 'Test_13_DBC_L4_Markram/')
run_reduce(morphology_file=os.path.join(path, "C140600C-I1_-_Clone_2.asc"),
model_file=os.path.join(path, "cNAC187_L4_DBC_23ffe29c8b.hoc"),
frequency=0,
synapse_file=os.path.join(path, "synapses_locations.txt"),
voltage_file=os.path.join(path, "voltage_vectors_for_unit_test.txt"),
create_type ='bbpnew',
celsius=34,
)
if __name__ == '__main__':
test1()
test2()
test3_a71075_passive()
test4_amsalem_2016()
test5_Hay_2011_active_dendrite()
test6_L4_LBC_cNAC187_5_for_run()
test7_Almog_Korngreen_2014()
test8_Marasco_Limongiello_Migliore_2012()
test9_model_48310820()
test10_model_47804508()
test11_human_Eyal_2016()
test12_TPC_Markram_2016()
test13_dbc_Markram_2015()
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,500
|
orena1/neuron_reduce
|
refs/heads/master
|
/neuron_reduce/__init__.py
|
from .subtree_reductor_func import subtree_reductor
import os
def run_tests():
pp = os.path.realpath(__file__)
pp = os.path.dirname(pp)
print(pp)
execfile(pp + '/../tests/run_all_tests.py')
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,501
|
orena1/neuron_reduce
|
refs/heads/master
|
/neuron_reduce/subtree_reductor_func.py
|
'''
function subtree_reductor():
which reduces a morphologically detailed cell instance into a morphologically
simplified cell instance, according to NeuroReduce and merges synapses of the
same type (same reverse potential, tau1, and tau2) that are mapped to the same
segment. (see more in Readme on tool and usage)
usage: For details, see comments in function
outputs: reduced cell instance, a new synapses_list, and the netcons_list,
which now corresponds to the new synapses.
- The model template file must have an init() function (see example in the
attached model.hoc file) and the following public definitions specifying
sections and section lists accordingly:
public soma, dend, apic ; public all, somatic, apical, basal
- Supports numerous types of synapses (two synapses are considered to be of
different types if they are different from each other in at least one of the
following values: reverse potential, tau1, tau2)
'''
import collections
import itertools as it
import logging
import math
import re
import cmath
import numpy as np
import neuron
from neuron import h
h.load_file("stdrun.hoc")
from .reducing_methods import (reduce_subtree,
reduce_synapse,
measure_input_impedance_of_subtree,
CableParams,
SynapseLocation,
push_section,
)
logger = logging.getLogger(__name__)
SOMA_LABEL = "soma"
EXCLUDE_MECHANISMS = ('pas', 'na_ion', 'k_ion', 'ca_ion', 'h_ion', 'ttx_ion', )
def create_sections_in_hoc(type_of_section, num, instance_as_str):
'''creates sections in the hoc world according to the given section type and number of sections
in the instance whose name is given as a string
'''
h("strdef string")
h.string = type_of_section
h('{sprint(string, "create %s[%d]", string, ' + str(num) + ') }')
h("{execute(string, " + instance_as_str + ")}")
def append_to_section_lists(section, type_of_sectionlist, instance_as_str):
''' appends given section to the sectionlist of the given type and to the "all" sectionlist
in the hoc world in the instance whose name is given as a string
'''
h("strdef string")
h.string = section + " " + type_of_sectionlist + ".append()"
h("{execute(string, " + instance_as_str + ")}")
h.string = section + " all.append()"
h("{execute(string, " + instance_as_str + ")}")
def find_section_number(section):
''' extracts and returns the section number from the given section object '''
sec_name = h.secname(sec=section)
ints_in_name = re.findall(r'\d+', sec_name)
sec_num = ints_in_name[len(ints_in_name) - 1] # extracts section number
return sec_num
def calculate_nsegs_from_manual_arg(new_cable_properties, total_segments_wanted):
'''Calculates the number of segments for each section in the reduced model
according to the given total_segments_wanted and the given
new_dends_electrotonic_length (the electrotonic lengths of all the new
sections). Called when the user chooses to give to the program the
approximate total number of segments that the reduced model should have
(non-default calculation).
'''
# minus one for the one segment of the soma:
total_segments_in_dendrites = total_segments_wanted - 1
# total electrotonic length of reduced dendritic cables
sum_of_lengths = sum(prop.electrotonic_length
for prop in new_cable_properties)
# the num of segments assigned to each section is in proportion to the
# section's relative contribution to the total electrotonic length in the
# model
dends_nsegs = []
for prop in new_cable_properties:
new_length = prop.electrotonic_length
new_nseg_to_put = int(round((float(new_length) / sum_of_lengths) *
total_segments_in_dendrites))
if new_nseg_to_put < 1:
new_nseg_to_put = 1
dends_nsegs.append(new_nseg_to_put)
return dends_nsegs
def calculate_nsegs_from_lambda(new_cable_properties):
'''calculate the number of segments for each section in the reduced model
according to the length (in microns) and space constant (= lambda - in
microns) that were previously calculated for each section and are given in
subtree_dimensions. According to this calculation, a segment is formed for
every 0.1 * lambda in a section. (lambda = space constant = electrotonic length unit).
'''
dends_nsegs = []
for cable in new_cable_properties:
# for every unit of electronic length (length/space_constant such units)
# ~10 segments are formed
dends_nsegs.append(int((float(cable.length) / cable.space_const) * 10 / 2) * 2 + 1)
return dends_nsegs
def mark_subtree_sections_with_subtree_index(sections_to_delete,
section_per_subtree_index,
root_sec_of_subtree,
mapping_sections_to_subtree_index,
section_type,
subtree_index):
'''Recursively marks all sections in the subtree as belonging to the given subtree_index
using the given dict mapping_sections_to_subtree_index, as follows:
mapping_sections_to_subtree_index[(<section_type>, <section_number>)] = given subtree_index
'''
sections_to_delete.append(root_sec_of_subtree)
section_per_subtree_index.setdefault(subtree_index, [])
section_per_subtree_index[subtree_index].append(root_sec_of_subtree)
section_num = find_section_number(root_sec_of_subtree)
for child in root_sec_of_subtree.children():
mark_subtree_sections_with_subtree_index(sections_to_delete,
section_per_subtree_index,
child,
mapping_sections_to_subtree_index,
section_type,
subtree_index)
mapping_sections_to_subtree_index[(section_type, section_num)] = subtree_index
def find_synapse_loc(synapse_or_segment, mapping_sections_to_subtree_index):
''' Returns the location of the given synapse object'''
if not isinstance(synapse_or_segment, neuron.nrn.Segment):
synapse_or_segment = synapse_or_segment.get_segment()
x = synapse_or_segment.x
with push_section(synapse_or_segment.sec):
# extracts the section type ("soma", "apic", "dend") and the section number
# out of the section name
full_sec_name = h.secname()
sec_name_as_list = full_sec_name.split(".")
short_sec_name = sec_name_as_list[len(sec_name_as_list) - 1]
section_type = short_sec_name.split("[")[0]
section_num = re.findall(r'\d+', short_sec_name)[0]
# finds the index of the subtree that this synapse belongs to using the
# given mapping_sections_to_subtree_index which maps sections to the
# subtree indexes that they belong to
if section_type == "apic":
subtree_index = mapping_sections_to_subtree_index[("apic", section_num)]
elif section_type == "dend":
subtree_index = mapping_sections_to_subtree_index[("basal", section_num)]
else: # somatic synapse
subtree_index, section_num, x = SOMA_LABEL, 0, 0
return SynapseLocation(subtree_index, int(section_num), x)
def find_and_disconnect_axon(soma_ref):
'''Searching for an axon, it can be a child of the soma or a parent of the soma.'''
axon_section, axon_parent, soma_axon_x = [], False, None
for sec in soma_ref.child:
name = sec.hname().lower()
if 'axon' in name or 'hill' in name:
axon_section.append(sec)
# disconnect axon
soma_axon_x = sec.parentseg().x
sec.push()
h.disconnect()
h.define_shape()
if soma_ref.has_parent():
name = soma_ref.parent().sec.hname().lower()
if 'axon' in name or 'hill' in name:
axon_section.append(soma_ref.parent())
axon_parent = True
soma_axon_x = None
soma_ref.push()
h.disconnect()
else:
raise Exception('Soma has a parent which is not an axon')
if len(axon_section) > 1:
raise Exception('Soma has a two axons')
return axon_section, axon_parent, soma_axon_x
def create_segments_to_mech_vals(sections_to_delete,
remove_mechs=True,
exclude=EXCLUDE_MECHANISMS):
'''This function copy the create a mapping between a dictionary and the mechanisms that it have
plus the values of those mechanisms. It also remove the mechanisms from the model in order to
create a passive model
Arguments:
remove_mechs - False|True
if True remove the mechs after creating the mapping, False - keep the mechs
exclude - List of all the mechs name that should not be removed
'''
exclude = set(exclude)
segment_to_mech_vals, mech_names = {}, set()
for seg in it.chain.from_iterable(sections_to_delete):
segment_to_mech_vals[seg] = {}
for mech in seg:
mech_name = mech.name()
segment_to_mech_vals[seg][mech_name] = {}
for n in dir(mech):
if n.startswith('__') or n in ('next', 'name', 'is_ion', 'segment', ):
continue
if not n.endswith('_' + mech_name) and not mech_name.endswith('_ion'):
n += '_' + mech_name
segment_to_mech_vals[seg][mech_name][n] = getattr(seg, n)
mech_names.add(mech_name)
mech_names -= exclude
if remove_mechs: # Remove all the mechs from the sections
for sec in sections_to_delete:
with push_section(sec):
for mech in mech_names:
h("uninsert " + mech)
return segment_to_mech_vals
def create_seg_to_seg(original_cell,
section_per_subtree_index,
roots_of_subtrees,
mapping_sections_to_subtree_index,
new_cable_properties,
has_apical,
apic,
basals,
subtree_ind_to_q,
mapping_type,
reduction_frequency):
'''create mapping between segments in the original model to segments in the reduced model
if mapping_type == impedance the mapping will be a response to the
transfer impedance of each segment to the soma (like the synapses)
if mapping_type == distance the mapping will be a response to the
distance of each segment to the soma (like the synapses) NOT IMPLEMENTED
YET
'''
assert mapping_type == 'impedance', 'distance mapping not implemented yet'
# the keys are the segments of the original model, the values are the
# segments of the reduced model
original_seg_to_reduced_seg = {}
reduced_seg_to_original_seg = collections.defaultdict(list)
for subtree_index in section_per_subtree_index:
for sec in section_per_subtree_index[subtree_index]:
for seg in sec:
synapse_location = find_synapse_loc(seg, mapping_sections_to_subtree_index)
imp_obj, subtree_input_impedance = measure_input_impedance_of_subtree(
roots_of_subtrees[subtree_index], reduction_frequency)
# if synapse is on the apical subtree
on_basal_subtree = not (has_apical and subtree_index == 0)
mid_of_segment_loc = reduce_synapse(
original_cell,
synapse_location,
on_basal_subtree,
imp_obj,
subtree_input_impedance,
new_cable_properties[subtree_index].electrotonic_length,
subtree_ind_to_q[subtree_index])
if on_basal_subtree:
if has_apical:
new_section_for_synapse = basals[subtree_index - 1]
else:
new_section_for_synapse = basals[subtree_index]
else:
new_section_for_synapse = apic
reduced_seg = new_section_for_synapse(mid_of_segment_loc)
original_seg_to_reduced_seg[seg] = reduced_seg
reduced_seg_to_original_seg[reduced_seg].append(seg)
return original_seg_to_reduced_seg, dict(reduced_seg_to_original_seg)
def copy_dendritic_mech(original_seg_to_reduced_seg,
reduced_seg_to_original_seg,
apic,
basals,
segment_to_mech_vals,
mapping_type='impedance'):
''' copies the mechanisms from the original model to the reduced model'''
# copy mechanisms
# this is needed for the case where some segements were not been mapped
mech_names_per_segment = collections.defaultdict(list)
vals_per_mech_per_segment = {}
for reduced_seg, original_segs in reduced_seg_to_original_seg.items():
vals_per_mech_per_segment[reduced_seg] = collections.defaultdict(list)
for original_seg in original_segs:
for mech_name, mech_params in segment_to_mech_vals[original_seg].items():
for param_name, param_value in mech_params.items():
vals_per_mech_per_segment[reduced_seg][param_name].append(param_value)
mech_names_per_segment[reduced_seg].append(mech_name)
reduced_seg.sec.insert(mech_name)
for param_name, param_values in vals_per_mech_per_segment[reduced_seg].items():
setattr(reduced_seg, param_name, np.mean(param_values))
all_segments = []
if apic is not None:
all_segments.extend(list(apic))
for bas in basals:
all_segments.extend(list(bas))
if len(all_segments) != len(reduced_seg_to_original_seg):
logger.warning('There is no segment to segment copy, it means that some segments in the'
'reduced model did not receive channels from the original cell.'
'Trying to compensate by copying channels from neighboring segments')
handle_orphan_segments(original_seg_to_reduced_seg,
all_segments,
vals_per_mech_per_segment,
mech_names_per_segment)
def handle_orphan_segments(original_seg_to_reduced_seg,
all_segments,
vals_per_mech_per_segment,
mech_names_per_segment):
''' This function handle reduced segments that did not had original segments mapped to them'''
# Get all reduced segments that have been mapped by a original model segment
all_mapped_control_segments = original_seg_to_reduced_seg.values()
non_mapped_segments = set(all_segments) - set(all_mapped_control_segments)
for reduced_seg in non_mapped_segments:
seg_secs = list(reduced_seg.sec)
# find valid parent
parent_seg_index = seg_secs.index(reduced_seg) - 1
parent_seg = None
while parent_seg_index > -1:
if seg_secs[parent_seg_index] in all_mapped_control_segments:
parent_seg = seg_secs[parent_seg_index]
break
else:
parent_seg_index -= 1
# find valid child
child_seg_index = seg_secs.index(reduced_seg) + 1
child_seg = None
while child_seg_index < len(seg_secs):
if seg_secs[child_seg_index] in all_mapped_control_segments:
child_seg = seg_secs[child_seg_index]
break
else:
child_seg_index += 1
if not parent_seg and not child_seg:
raise Exception("no child seg nor parent seg, with active channels, was found")
if parent_seg and not child_seg:
for mech in mech_names_per_segment[parent_seg]:
reduced_seg.sec.insert(mech)
for n in vals_per_mech_per_segment[parent_seg]:
setattr(reduced_seg, n, np.mean(vals_per_mech_per_segment[parent_seg][n]))
if not parent_seg and child_seg:
for mech in mech_names_per_segment[child_seg]:
reduced_seg.sec.insert(mech)
for n in vals_per_mech_per_segment[child_seg]:
setattr(reduced_seg, n, np.mean(vals_per_mech_per_segment[child_seg][n]))
# if both parent and child were found, we add to the segment all the mech in both
# this is just a decision
if parent_seg and child_seg:
for mech in set(mech_names_per_segment[child_seg]) & set(mech_names_per_segment[parent_seg]):
reduced_seg.sec.insert(mech)
for n in vals_per_mech_per_segment[child_seg]:
child_mean = np.mean(vals_per_mech_per_segment[child_seg][n])
if n in vals_per_mech_per_segment[parent_seg]:
parent_mean = np.mean(vals_per_mech_per_segment[parent_seg][n])
setattr(reduced_seg, n, (child_mean + parent_mean) / 2)
else:
setattr(reduced_seg, n, child_mean)
for n in vals_per_mech_per_segment[parent_seg]:
parent_mean = np.mean(vals_per_mech_per_segment[parent_seg][n])
if n in vals_per_mech_per_segment[child_seg]:
child_mean = np.mean(vals_per_mech_per_segment[child_seg][n])
setattr(reduced_seg, n, (child_mean + parent_mean) / 2)
else:
setattr(reduced_seg, n, parent_mean)
def add_PP_properties_to_dict(PP, PP_params_dict):
'''
add the propeties of a point process to PP_params_dict.
The only propeties added to the dictionary are those worth comparing
'''
skipped_params = {"Section", "allsec", "baseattr", "cas", "g", "get_loc", "has_loc", "hname",
'hocobjptr', "i", "loc", "next", "ref", "same", "setpointer", "state",
"get_segment",
}
PP_params = []
for param in dir(PP):
if param.startswith("__") or param in skipped_params:
continue
PP_params.append(param)
PP_params_dict[type_of_point_process(PP)] = PP_params
def type_of_point_process(PP):
s = PP.hname()
ix = PP.hname().find("[")
return s[:ix]
def apply_params_to_section(name, type_of_sectionlist, instance_as_str, section, cable_params, nseg):
section.L = cable_params.length
section.diam = cable_params.diam
section.nseg = nseg
append_to_section_lists(name, type_of_sectionlist, instance_as_str)
section.insert('pas')
section.cm = cable_params.cm
section.g_pas = 1.0 / cable_params.rm
section.Ra = cable_params.ra
section.e_pas = cable_params.e_pas
def calculate_subtree_q(root, reduction_frequency):
rm = 1.0 / root.g_pas
rc = rm * (float(root.cm) / 1000000)
angular_freq = 2 * math.pi * reduction_frequency
q_imaginary = angular_freq * rc
q_subtree = complex(1, q_imaginary) # q=1+iwRC
q_subtree = cmath.sqrt(q_subtree)
return q_subtree
def synapse_properties_match(synapse, PP, PP_params_dict):
if PP.hname()[:PP.hname().rindex('[')] != synapse.hname()[:synapse.hname().rindex('[')]:
return False
for param in PP_params_dict[type_of_point_process(PP)]:
if(param not in ['rng'] and # https://github.com/neuronsimulator/nrn/issues/136
str(type(getattr(PP, param))) != "<type 'hoc.HocObject'>" and # ignore hoc objects
getattr(PP, param) != getattr(synapse, param)):
return False
return True
def load_model(model_filename):
model_obj_name = model_filename.split(".")[0].split('/')[-1]
if h.name_declared(model_obj_name) == 0:
logger.debug("loading template '%s'" % model_obj_name)
if model_filename == 'model.hoc':
logger.debug("loading default reduced model")
load_default_model()
else:
h.load_file(model_filename)
else:
logger.info("The template '%s' is already defined... not loading." % model_obj_name)
return model_obj_name
def gather_subtrees(soma_ref):
'''get all the subtrees of the soma
assumes the axon is already disconnected
return (list(roots_of_subtrees), list(num_of_subtrees))
where:
roots_of_subtrees holds the root sections of each of the soma's subtrees
note: The apical, if it exists, has been moved to the front
num_of_subtrees correctly the number of subtrees, excluding the axon
'''
roots_of_subtrees = []
num_of_subtrees = []
for i in range(int(soma_ref.nchild())):
if 'soma' in str(soma_ref.child[i]):
logger.warning("soma is child, ignore - not tested yet")
continue
num_of_subtrees.append(i)
roots_of_subtrees.append(soma_ref.child[i])
# assuming up to one apical tree
ix_of_apical = None
for i in num_of_subtrees:
if 'apic' in roots_of_subtrees[i].hname():
assert ix_of_apical is None, 'Multiple apical dendrites not supported'
ix_of_apical = i
if ix_of_apical is not None:
roots_of_subtrees = ([roots_of_subtrees[ix_of_apical]] +
roots_of_subtrees[:ix_of_apical] +
roots_of_subtrees[ix_of_apical + 1:])
return roots_of_subtrees, num_of_subtrees
def gather_cell_subtrees(roots_of_subtrees):
# dict that maps section indexes to the subtree index they are in: keys are
# string tuples: ("apic"/"basal", orig_section_index) , values are ints:
# subtree_instance_index
sections_to_delete = []
section_per_subtree_index = {}
mapping_sections_to_subtree_index = {}
for i, soma_child in enumerate(roots_of_subtrees):
# inserts each section in this subtree into the above dict, which maps
# it to the subtree index
if 'apic' in soma_child.hname():
assert i == 0, ('The apical is not the first child of the soma! '
'a code refactoring is needed in order to accept it')
mark_subtree_sections_with_subtree_index(sections_to_delete,
section_per_subtree_index,
soma_child,
mapping_sections_to_subtree_index,
"apic",
i)
elif 'dend' in soma_child.hname() or 'basal' in soma_child.hname():
mark_subtree_sections_with_subtree_index(sections_to_delete,
section_per_subtree_index,
soma_child,
mapping_sections_to_subtree_index,
"basal",
i)
return sections_to_delete, section_per_subtree_index, mapping_sections_to_subtree_index
def create_reduced_cell(soma_cable,
has_apical,
original_cell,
model_obj_name,
new_cable_properties,
new_cables_nsegs,
subtrees_xs):
h("objref reduced_cell")
h("reduced_cell = new " + model_obj_name + "()")
create_sections_in_hoc("soma", 1, "reduced_cell")
soma = original_cell.soma[0] if original_cell.soma.hname()[-1] == ']' else original_cell.soma
append_to_section_lists("soma[0]", "somatic", "reduced_cell")
if has_apical: # creates reduced apical cable if apical subtree existed
create_sections_in_hoc("apic", 1, "reduced_cell")
apic = h.reduced_cell.apic[0]
num_of_basal_subtrees = len(new_cable_properties) - 1
cable_params = new_cable_properties[0]
nseg = new_cables_nsegs[0]
apply_params_to_section("apic[0]", "apical", "reduced_cell",
apic, cable_params, nseg)
apic.connect(soma, subtrees_xs[0], 0)
else:
apic = None
num_of_basal_subtrees = len(new_cable_properties)
# creates reduced basal cables
create_sections_in_hoc("dend", num_of_basal_subtrees, "reduced_cell")
basals = [h.reduced_cell.dend[i] for i in range(num_of_basal_subtrees)]
for i in range(num_of_basal_subtrees):
if has_apical:
index_in_reduced_cables_dimensions = i + 1
else:
index_in_reduced_cables_dimensions = i
cable_params = new_cable_properties[index_in_reduced_cables_dimensions]
nseg = new_cables_nsegs[index_in_reduced_cables_dimensions]
apply_params_to_section("dend[" + str(i) + "]", "basal", "reduced_cell",
basals[i], cable_params, nseg)
basals[i].connect(soma, subtrees_xs[index_in_reduced_cables_dimensions], 0)
# create cell python template
cell = Neuron(h.reduced_cell)
cell.soma = original_cell.soma
cell.apic = apic
return cell, basals
def merge_and_add_synapses(num_of_subtrees,
new_cable_properties,
PP_params_dict,
synapses_list,
mapping_sections_to_subtree_index,
netcons_list,
has_apical,
roots_of_subtrees,
original_cell,
basals,
cell,
reduction_frequency):
# dividing the original synapses into baskets, so that all synapses that are
# on the same subtree will be together in the same basket
# a list of baskets of synapses, each basket in the list will hold the
# synapses of the subtree of the corresponding basket index
baskets = [[] for _ in num_of_subtrees]
soma_synapses_syn_to_netcon = {}
for syn_index, synapse in enumerate(synapses_list):
synapse_location = find_synapse_loc(synapse, mapping_sections_to_subtree_index)
# for a somatic synapse
# TODO: 'axon' is never returned by find_synapse_loc...
if synapse_location.subtree_index in (SOMA_LABEL, 'axon'):
soma_synapses_syn_to_netcon[synapse] = netcons_list[syn_index]
else:
baskets[synapse_location.subtree_index].append((synapse, synapse_location, syn_index))
# mapping (non-somatic) synapses to their new location on the reduced model
# (the new location is the exact location of the middle of the segment they
# were mapped to, in order to enable merging)
new_synapses_list, subtree_ind_to_q = [], {}
for subtree_index in num_of_subtrees:
imp_obj, subtree_input_impedance = measure_input_impedance_of_subtree(
roots_of_subtrees[subtree_index], reduction_frequency)
subtree_ind_to_q[subtree_index] = calculate_subtree_q(
roots_of_subtrees[subtree_index], reduction_frequency)
# iterates over the synapses in the curr basket
for synapse, synapse_location, syn_index in baskets[subtree_index]:
on_basal_subtree = not (has_apical and subtree_index == 0)
# "reduces" the synapse - finds this synapse's new "merged"
# location on its corresponding reduced cable
x = reduce_synapse(original_cell,
synapse_location,
on_basal_subtree,
imp_obj,
subtree_input_impedance,
new_cable_properties[subtree_index].electrotonic_length,
subtree_ind_to_q[subtree_index])
# find the section of the synapse
if on_basal_subtree:
if has_apical:
section_for_synapse = basals[subtree_index - 1]
else:
section_for_synapse = basals[subtree_index]
else:
section_for_synapse = cell.apic
# go over all point processes in this segment and see whether one
# of them has the same proporties of this synapse
# If there's such a synapse link the original NetCon with this point processes
# If not, move the synapse to this segment.
for PP in section_for_synapse(x).point_processes():
if type_of_point_process(PP) not in PP_params_dict:
add_PP_properties_to_dict(PP, PP_params_dict)
if synapse_properties_match(synapse, PP, PP_params_dict):
netcons_list[syn_index].setpost(PP)
break
else: # If for finish the loop -> first appearance of this synapse
synapse.loc(x, sec=section_for_synapse)
new_synapses_list.append(synapse)
# merging somatic and axonal synapses
synapses_per_seg = collections.defaultdict(list)
for synapse in soma_synapses_syn_to_netcon:
seg_pointer = synapse.get_segment()
for PP in synapses_per_seg[seg_pointer]:
if type_of_point_process(PP) not in PP_params_dict:
add_PP_properties_to_dict(PP, PP_params_dict)
if synapse_properties_match(synapse, PP, PP_params_dict):
soma_synapses_syn_to_netcon[synapse].setpost(PP)
break
else: # If for finish the loop -> first appearance of this synapse
synapse.loc(seg_pointer.x, sec=seg_pointer.sec)
new_synapses_list.append(synapse)
synapses_per_seg[seg_pointer].append(synapse)
return new_synapses_list, subtree_ind_to_q
def textify_seg_to_seg(segs):
'''convert segment dictionary to text'''
ret = {str(k): str(v) for k, v in segs.items()}
return ret
def subtree_reductor(original_cell,
synapses_list,
netcons_list,
reduction_frequency,
model_filename='model.hoc',
total_segments_manual=-1,
PP_params_dict=None,
mapping_type='impedance',
return_seg_to_seg=False
):
'''
Receives an instance of a cell with a loaded full morphology, a list of
synapse objects, a list of NetCon objects (the i'th netcon in the list
should correspond to the i'th synapse), the filename (string) of the model
template hoc file that the cell was instantiated from, the desired
reduction frequency as a float, optional parameter for the approximate
desired number of segments in the new model (if this parameter is empty,
the number of segments will be such that there is a segment for every 0.1
lambda), and an optional param for the point process to be compared before
deciding on whether to merge a synapse or not and reduces the cell (using
the given reduction_frequency). Creates a reduced instance using the model
template in the file whose filename is given as a parameter, and merges
synapses of the same type that get mapped to the same segment
(same "reduced" synapse object for them all, but different NetCon objects).
model_filename : model.hoc will use a default template
total_segments_manual: sets the number of segments in the reduced model
can be either -1, a float between 0 to 1, or an int
if total_segments_manual = -1 will do automatic segmentation
if total_segments_manual>1 will set the number of segments
in the reduced model to total_segments_manual
if 0>total_segments_manual>1 will automatically segment the model
but if the automatic segmentation will produce a segment number that
is lower than original_number_of_segments*total_segments_manual it
will set the number of segments in the reduced model to:
original_number_of_segments*total_segments_manual
return_seg_to_seg: if True the function will also return a textify version of the mapping
between the original segments to the reduced segments
Returns the new reduced cell, a list of the new synapses, and the list of
the inputted netcons which now have connections with the new synapses.
Notes:
1) The original cell instance, synapses and Netcons given as arguments are altered
by the function and cannot be used outside of it in their original context.
2) Synapses are determined to be of the same type and mergeable if their reverse
potential, tau1 and tau2 values are identical.
3) Merged synapses are assigned a single new synapse object that represents them
all, but keep their original NetCon objects. Each such NetCon now connects the
original synapse's NetStim with
the reduced synapse.
'''
if PP_params_dict is None:
PP_params_dict = {}
h.init()
model_obj_name = load_model(model_filename)
# finds soma properties
soma = original_cell.soma[0] if original_cell.soma.hname()[-1] == ']' else original_cell.soma
soma_cable = CableParams(length=soma.L, diam=soma.diam, space_const=None,
cm=soma.cm, rm=1.0 / soma.g_pas, ra=soma.Ra, e_pas=soma.e_pas,
electrotonic_length=None)
has_apical = len(list(original_cell.apical)) != 0
soma_ref = h.SectionRef(sec=soma)
axon_section, axon_is_parent, soma_axon_x = find_and_disconnect_axon(soma_ref)
roots_of_subtrees, num_of_subtrees = gather_subtrees(soma_ref)
sections_to_delete, section_per_subtree_index, mapping_sections_to_subtree_index = \
gather_cell_subtrees(roots_of_subtrees)
# preparing for reduction
# remove active conductances and get seg_to_mech dictionary
segment_to_mech_vals = create_segments_to_mech_vals(sections_to_delete)
# disconnects all the subtrees from the soma
subtrees_xs = []
for subtree_root in roots_of_subtrees:
subtrees_xs.append(subtree_root.parentseg().x)
h.disconnect(sec=subtree_root)
# reducing the subtrees
new_cable_properties = [reduce_subtree(roots_of_subtrees[i], reduction_frequency)
for i in num_of_subtrees]
if total_segments_manual > 1:
new_cables_nsegs = calculate_nsegs_from_manual_arg(new_cable_properties,
total_segments_manual)
else:
new_cables_nsegs = calculate_nsegs_from_lambda(new_cable_properties)
if total_segments_manual > 0:
original_cell_seg_n = (sum(i.nseg for i in list(original_cell.basal)) +
sum(i.nseg for i in list(original_cell.apical))
)
min_reduced_seg_n = int(round((total_segments_manual * original_cell_seg_n)))
if sum(new_cables_nsegs) < min_reduced_seg_n:
logger.debug("number of segments calculated using lambda is {}, "
"the original cell had {} segments. "
"The min reduced segments is set to {}% of reduced cell segments".format(
sum(new_cables_nsegs),
original_cell_seg_n,
total_segments_manual * 100))
logger.debug("the reduced cell nseg is set to %s" % min_reduced_seg_n)
new_cables_nsegs = calculate_nsegs_from_manual_arg(new_cable_properties,
min_reduced_seg_n)
cell, basals = create_reduced_cell(soma_cable,
has_apical,
original_cell,
model_obj_name,
new_cable_properties,
new_cables_nsegs,
subtrees_xs)
new_synapses_list, subtree_ind_to_q = merge_and_add_synapses(
num_of_subtrees,
new_cable_properties,
PP_params_dict,
synapses_list,
mapping_sections_to_subtree_index,
netcons_list,
has_apical,
roots_of_subtrees,
original_cell,
basals,
cell,
reduction_frequency)
# create segment to segment mapping
original_seg_to_reduced_seg, reduced_seg_to_original_seg = create_seg_to_seg(
original_cell,
section_per_subtree_index,
roots_of_subtrees,
mapping_sections_to_subtree_index,
new_cable_properties,
has_apical,
cell.apic,
basals,
subtree_ind_to_q,
mapping_type,
reduction_frequency)
# copy active mechanisms
copy_dendritic_mech(original_seg_to_reduced_seg,
reduced_seg_to_original_seg,
cell.apic,
basals,
segment_to_mech_vals,
mapping_type)
if return_seg_to_seg:
original_seg_to_reduced_seg_text = textify_seg_to_seg(original_seg_to_reduced_seg)
# Connect axon back to the soma
if len(axon_section) > 0:
if axon_is_parent:
soma.connect(axon_section[0])
else:
axon_section[0].connect(soma, soma_axon_x)
# Now we delete the original model
for section in sections_to_delete:
with push_section(section):
h.delete_section()
cell.axon = axon_section
cell.dend = cell.hoc_model.dend
with push_section(cell.hoc_model.soma[0]):
h.delete_section()
if return_seg_to_seg:
return cell, new_synapses_list, netcons_list, original_seg_to_reduced_seg_text
else:
return cell, new_synapses_list, netcons_list
class Neuron(object):
'Python neuron class for hoc models'
def __init__(self, model):
self.hoc_model = model
self.soma = None
self.dend = None
self.apic = None
self.axon = None
def load_default_model():
h('''begintemplate model
public init, biophys, geom_nseg, delete_axon, finish_creating_model_after_loading_morphology
public soma, dend, apic, axon // sections
public all, somatic, apical, axonal, basal // section lists
objref all, somatic, apical, axonal, basal, this
proc init() {
all = new SectionList()
somatic = new SectionList()
basal = new SectionList()
apical = new SectionList()
axonal = new SectionList()
forall delete_section()
StepDist = 60 // human cells have no spines in their first 60 um
// from soma - see Benavides-Piccione 2013
F_Spines = 1.9 //As calculated - see detailes in Eyal 2015
CM =0.45 // uF/cm2
RM = 38907
RA = 203
E_PAS = -86
}
create soma[1], dend[1], apic[1], axon[1]
//external lambda_f
proc geom_nseg() {
soma distance()
forsec all {
RA_calc = RA
RM_calc = RM*F_Spines
if (distance(1)>StepDist){
RA_calc = RA
RM_calc = RM*F_Spines
}
d = diam
lambda = sqrt(RM_calc/RA_calc*d/10000/4)*10000
nseg = int(L/lambda*10/2)*2+1
}
}
proc biophys() {
forsec all {
insert pas
cm =CM
g_pas=1/RM
Ra = RA
e_pas = E_PAS
}
soma distance()
forsec basal {
if (distance(0.5)>StepDist) {
L = L*F_Spines^(2/3)
diam = diam*(F_Spines^(1/3))
}
}
forsec apical {
if (distance(0.5)>StepDist) {
L = L*F_Spines^(2/3)
diam = diam*(F_Spines^(1/3))
}
}
}
proc delete_axon(){
forsec axonal{delete_section()}
}
proc complete_full_model_creation() {
geom_nseg() // calculates num of segments
delete_axon() // deletes the axon
biophys() // increases cell dimensions to account for spines
}
endtemplate model''')
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,502
|
orena1/neuron_reduce
|
refs/heads/master
|
/neuron_reduce/reducing_methods.py
|
'''
This file contains the reduction algorithm itself
added the method by Guy to find L and X
'''
import collections
import contextlib
import logging
import math
import cmath
from neuron import h
logger = logging.getLogger(__name__)
CableParams = collections.namedtuple('CableParams',
'length, diam, space_const,'
'cm, rm, ra, e_pas, electrotonic_length')
SynapseLocation = collections.namedtuple('SynapseLocation', 'subtree_index, section_num, x')
h('''obfunc lowest_impedance_recursive() { local lowest_impedance, lowest_phase, i localobj curr_subtree_root, sref1, lowest_imp_vec, lowest_child_subtree_impedance, imp_obj
curr_subtree_root = $o1 // in the first call to the function, this is a root section of a dendritic trunk
imp_obj = $o2
curr_subtree_root.sec {
lowest_impedance = imp_obj.transfer(1) // farthest tip of the the curr root section
lowest_phase = imp_obj.transfer_phase(1)
}
if (curr_subtree_root.nchild != 0) { // if the curr section has child sections
for i=0, curr_subtree_root.nchild-1 curr_subtree_root.child[i] { // for each child of the root, finds the lowest impedance within the subtree whose root is the curr child (in relation to the proximal tip in the curr root child)
curr_subtree_root.child[i] sref1 = new SectionRef()
lowest_child_subtree_impedance = lowest_impedance_recursive(sref1, imp_obj) // recursively returns the lowest transfer impedance and transfer phase within the curr subtree as a vector
if (lowest_child_subtree_impedance.x[0] < lowest_impedance) {
lowest_impedance = lowest_child_subtree_impedance.x[0]
lowest_phase = lowest_child_subtree_impedance.x[1]
}
}
}
lowest_imp_vec = new Vector(2)
lowest_imp_vec.x[0] = lowest_impedance
lowest_imp_vec.x[1] = lowest_phase
return lowest_imp_vec
}''')
@contextlib.contextmanager
def push_section(section):
'''push a section onto the top of the NEURON stack, pop it when leaving the context'''
section.push()
yield
h.pop_section()
def _get_subtree_biophysical_properties(subtree_root_ref, frequency):
''' gets the biophysical cable properties (Rm, Ra, Rc) and q
for the subtree to be reduced according to the properties of the root section of the subtree
'''
section = subtree_root_ref.sec
rm = 1.0 / section.g_pas # in ohm * cm^2
# in secs, with conversion of the capacitance from uF/cm2 to F/cm2
RC = rm * (float(section.cm) / 1000000)
# defining q=sqrt(1+iwRC))
angular_freq = 2 * math.pi * frequency # = w
q_imaginary = angular_freq * RC
q = complex(1, q_imaginary) # q=1+iwRC
q = cmath.sqrt(q) # q = sqrt(1+iwRC)
return (section.cm,
rm,
section.Ra, # in ohm * cm
section.e_pas,
q)
def find_lowest_subtree_impedance(subtree_root_ref, imp_obj):
'''
finds the segment in the subtree with the lowest transfer impedance in
relation to the proximal-to-soma end of the given subtree root section,
using a recursive hoc function,
returns the lowest impedance in Ohms
'''
# returns [lowest subtree transfer impedance in Mohms, transfer phase]
lowest_impedance = h.lowest_impedance_recursive(subtree_root_ref, imp_obj)
# impedance saved as a complex number after converting Mohms to ohms
curr_lowest_subtree_imp = cmath.rect(lowest_impedance.x[0] * 1000000, lowest_impedance.x[1])
return curr_lowest_subtree_imp
def compute_zl_polar(Z0, L, q):
'''
given Z0 , L and q computes the polar represntation of ZL (equation 2.9 in Gals thesis)
'''
ZL = Z0 * 1.0 / cmath.cosh(q * L)
ZL = cmath.polar(ZL)
return ZL
def find_best_real_L(Z0, ZL_goal, q, max_L=10.0, max_depth=50):
'''finds the best real L
s.t. the modulus part of the impedance of ZL in eq 2.9 will be correct
Since the modulus is a decreasing function of L, it is easy to find it using binary search.
'''
min_L = 0.0
current_L = (min_L + max_L) / 2.0
ZL_goal_A = cmath.polar(ZL_goal)[0]
for _ in range(max_depth):
Z_current_L_A = compute_zl_polar(Z0, current_L, q)[0]
if abs(ZL_goal_A - Z_current_L_A) <= 0.001: # Z are in Ohms , normal values are >10^6
break
elif ZL_goal_A > Z_current_L_A:
current_L, max_L = (min_L + current_L) / 2.0, current_L
else:
current_L, min_L = (max_L + current_L) / 2.0, current_L
else:
logger.info("The difference between L and the goal L is larger than 0.001")
return current_L
def compute_zx_polar(Z0, L, q, x):
'''computes the polar represntation of Zx (equation 2.8 in Gals thesis)
'''
ZX = Z0 * cmath.cosh(q * (L - x)) / cmath.cosh(q * L)
ZX = cmath.polar(ZX)
return ZX
def find_best_real_X(Z0, ZX_goal, q, L, max_depth=50):
'''finds the best location of a synapse (X)
s.t. the modulus part of the impedance of ZX in eq 2.8 will be correct.
Since the modulus is a decreasing function of L, it is easy to find it using binary search.
'''
min_x, max_x = 0.0, L
current_x = (min_x + max_x) / 2.0
ZX_goal = cmath.polar(ZX_goal)[0]
for _ in range(max_depth):
Z_current_X_A = compute_zx_polar(Z0, L, q, current_x)[0]
if abs(ZX_goal - Z_current_X_A) <= 0.001:
break
elif ZX_goal > Z_current_X_A:
current_x, max_x = (min_x + current_x) / 2.0, current_x
else:
current_x, min_x = (max_x + current_x) / 2.0, current_x
else:
logger.info("The difference between X and the goal X is larger than 0.001")
return current_x
def find_subtree_new_electrotonic_length(root_input_impedance, lowest_subtree_impedance, q):
''' finds the subtree's reduced cable's electrotonic length
based on the following equation:
lowest_subtree_impedance = subtree_root_input_impedance/cosh(q*L)
according to the given complex impedance values
'''
# this equation could be solved analytically using:
# L = 1/q * arcosh(subtree_root_input_impedance/lowest_subtree_impedance),
# But since L in this equation is complex number and we chose to focus on
# finding the correct attenuation
# we decided to search the L that will result with correct attenuation from
# the tip of the dendrite to the soma.
# We chose to use only real L (without a complex part)
L = find_best_real_L(root_input_impedance, lowest_subtree_impedance, q)
return L
def _find_subtree_new_diam_in_cm(root_input_impedance, electrotonic_length_as_complex, rm, ra, q):
'''finds the subtree's new cable's diameter (in cm)
according to the given complex input impedance at the segment in the
original subtree that is closest to the soma (the tip), and the given cable
electrotonic length,
with the following equation:
d (in cm) = (2/PI * (sqrt(RM*RA)/(q*subtree_root_input_impedance)) *
(coth(q * NewCableElectrotonicLength)) )^(2/3)
derived from Rall's cable theory for dendrites (Gal Eliraz)
'''
diam_in_cm = (2.0 / math.pi *
(math.sqrt(rm * ra) / (q * root_input_impedance)) *
(1 / cmath.tanh(q * electrotonic_length_as_complex)) # coth = 1/tanh
) ** (2.0 / 3)
'''
# for debugging inaccuracies:
if diam_in_cm.imag != 0:
if abs(diam_in_cm.imag) > 0.03:
print "PROBLEM - DIAM HAS SUBSTANTIAL IMAGINARY PART"
print "\n"
'''
# the radius of the complex number received from the equation
new_subtree_dend_diam_in_cm = cmath.polar(diam_in_cm)[0]
return new_subtree_dend_diam_in_cm
def find_space_const_in_cm(diameter, rm, ra):
''' returns space constant (lambda) in cm, according to: space_const = sqrt(rm/(ri+r0)) '''
# rm = Rm/(PI * diam), diam is in cm and Rm is in ohm * cm^2
rm = float(rm) / (math.pi * diameter)
# ri = 4*Ra/ (PI * diam^2), diam is in cm and Ra is in ohm * cm
ri = float(4 * ra) / (math.pi * (diameter**2))
space_const = math.sqrt(rm / ri) # r0 is negligible
return space_const
def reduce_subtree(subtree_root, frequency):
'''Reduces the subtree from the original_cell into one single section (cable).
The reduction is done by finding the length and diameter of the cable (a
single solution) that preserves the subtree's input impedance at the
somatic end, and the transfer impedance in the subtree from the distal end
to the proximal somatic end (between the new cable's two tips).
'''
subtree_root_ref = h.SectionRef(sec=subtree_root)
cm, rm, ra, e_pas, q = _get_subtree_biophysical_properties(subtree_root_ref, frequency)
# finds the subtree's input impedance (at the somatic-proximal end of the
# subtree root section) and the lowest transfer impedance in the subtree in
# relation to the somatic-proximal end (see more in Readme on NeuroReduce)
imp_obj, root_input_impedance = measure_input_impedance_of_subtree(subtree_root, frequency)
# in Ohms (a complex number)
curr_lowest_subtree_imp = find_lowest_subtree_impedance(subtree_root_ref, imp_obj)
# reducing the whole subtree into one section:
# L = 1/q * arcosh(ZtreeIn(f)/min(ZtreeX,0(f)),
# d = ( (2/pi * (sqrt(Rm*Ra)/q*ZtreeIn(f)) * coth(qL) )^(2/3) - from Gal Eliraz's thesis 1999
new_cable_electrotonic_length = find_subtree_new_electrotonic_length(root_input_impedance,
curr_lowest_subtree_imp,
q)
cable_electrotonic_length_as_complex = complex(new_cable_electrotonic_length, 0)
new_cable_diameter_in_cm = _find_subtree_new_diam_in_cm(root_input_impedance,
cable_electrotonic_length_as_complex,
rm,
ra,
q)
new_cable_diameter = new_cable_diameter_in_cm * 10000 # in microns
# calculating the space constant, in order to find the cylinder's length:
# space_const = sqrt(rm/(ri+r0))
curr_space_const_in_cm = find_space_const_in_cm(new_cable_diameter_in_cm,
rm,
ra)
curr_space_const_in_micron = 10000 * curr_space_const_in_cm
new_cable_length = curr_space_const_in_micron * new_cable_electrotonic_length # in microns
return CableParams(length=new_cable_length,
diam=new_cable_diameter,
space_const=curr_space_const_in_micron,
cm=cm,
rm=rm,
ra=ra,
e_pas=e_pas,
electrotonic_length=new_cable_electrotonic_length)
def find_merged_loc(cable_nseg, relative_loc):
'''
Returns a synapse's merged relative location (x) on the cable, according to
its given relative location on the cable and the given number of segments
in the cable.
The merged location is the relative location of the middle of the segment
the synapse is in (or 0 or 1 if it is at one of the tips of the cable).
'''
if relative_loc in (0, 1):
return relative_loc
# finds the segment that the synapse is in, according to its relative
# location and the num of segments in the cable (1 through nseg)
mapped_segment_for_curr_syn = int(relative_loc * cable_nseg) + 1
# location of middle of segment = the average between relative location of
# end of segment and relative location of beginning of segment
return ((float(mapped_segment_for_curr_syn) / cable_nseg) +
(float(mapped_segment_for_curr_syn - 1) / cable_nseg)) / 2
def measure_input_impedance_of_subtree(subtree_root_section, frequency):
'''measures the input impedance of the subtree with the given root section
(at the "0" tip, the soma-proximal end),
returns the Impedance hoc object and the input impedance as a complex value
'''
imp_obj = h.Impedance()
CLOSE_TO_SOMA_EDGE = 0
# sets origin for impedance calculations (soma-proximal end of root section)
imp_obj.loc(CLOSE_TO_SOMA_EDGE, sec=subtree_root_section)
# computes transfer impedance from every segment in the model in relation
# to the origin location above
imp_obj.compute(frequency + 1 / 9e9, 0)
# in Ohms (impedance measured at soma-proximal end of root section)
root_input_impedance = imp_obj.input(CLOSE_TO_SOMA_EDGE, sec=subtree_root_section) * 1000000
root_input_phase = imp_obj.input_phase(CLOSE_TO_SOMA_EDGE, sec=subtree_root_section)
# creates a complex impedance value out of the given polar coordinates
root_input_impedance = cmath.rect(root_input_impedance, root_input_phase)
return imp_obj, root_input_impedance
def reduce_synapse(cell_instance,
synapse_location,
on_basal,
imp_obj,
root_input_impedance,
new_cable_electrotonic_length,
q_subtree):
'''
Receives an instance of a cell, the location (section + relative
location(x)) of a synapse to be reduced, a boolean on_basal that is True if
the synapse is on a basal subtree, the number of segments in the reduced
cable that this synapse is in, an Impedance calculating Hoc object, the
input impedance at the root of this subtree, and the electrotonic length of
the reduced cable that represents the current subtree
(as a real and as a complex number) -
and maps the given synapse to its new location on the reduced cable
according to the NeuroReduce algorithm. Returns the new "post-merging"
relative location of the synapse on the reduced cable (x, 0<=x<=1), that
represents the middle of the segment that this synapse is located at in the
new reduced cable.
'''
# measures the original transfer impedance from the synapse to the
# somatic-proximal end in the subtree root section
if not on_basal: # apical subtree
section = cell_instance.apic[synapse_location.section_num]
else: # basal subtree
section = cell_instance.dend[synapse_location.section_num]
with push_section(section):
orig_transfer_imp = imp_obj.transfer(synapse_location.x) * 1000000 # ohms
orig_transfer_phase = imp_obj.transfer_phase(synapse_location.x)
# creates a complex Impedance value with the given polar coordinates
orig_synapse_transfer_impedance = cmath.rect(orig_transfer_imp, orig_transfer_phase)
# synapse location could be calculated using:
# X = L - (1/q) * arcosh( (Zx,0(f) / ZtreeIn(f)) * cosh(q*L) ),
# derived from Rall's cable theory for dendrites (Gal Eliraz)
# but we chose to find the X that will give the correct modulus. See comment about L values
synapse_new_electrotonic_location = find_best_real_X(root_input_impedance,
orig_synapse_transfer_impedance,
q_subtree,
new_cable_electrotonic_length)
new_relative_loc_in_section = (float(synapse_new_electrotonic_location) /
new_cable_electrotonic_length)
if new_relative_loc_in_section > 1: # PATCH
new_relative_loc_in_section = 0.999999
return new_relative_loc_in_section
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,503
|
orena1/neuron_reduce
|
refs/heads/master
|
/tests/test_script_helper.py
|
'''Test script for subtree reductor:
May be used to test the NeuroReduce reduction as performed by
subtree_reductor() in subtree_reductor_func.py. Receives as arguments a
Neurolucida full morphology file, a model template file, the total approximate
number of segments in the reduced model (-1 for default calculation of a
segment for every 0.1 lambda), and a file with synapse locations (see format
in Readme) - creates the full morphology instance, reduces it with
subtree_reductor(), and compares the voltage traces from the simulation of the
original full model and the reduced model using the same random synaptic
activity in both simulations, which is determined by parameters in this file.
How to use: Usage from shell: python test_script.py original_morphology_file
model_template_file reduction_frequency manual_total_nsegs
synapse_locations_file for example: python test_script.py
2013_03_06_cell08_876_H41_05_Cell2.ASC model.hoc 38 -1
origRandomSynapses-10000
- This tester supports only Neurolucida morphology files. To test NeuroReduce
with other kinds of morphology files, CODE CHANGES are required in
create_model()
- The model template file given to this tester must contain a
complete_full_model_creation() function (see Readme and example in the
attached model.hoc file) to ensure
the correct and complete creation of the full model.
- This tester supports excitatory and inhibitory synapses - randomly splits
the locations in the single given synapse locations file into excitatory and
inhibitory synapses according to the distribution determined by the user in
PERCENTAGE_OF_EXCITATORY_SYNAPSES; gives each synapse's NetStim
object a seed that corresponds to the synapse's index as excitatory or
inhibitory. To support more types of synapses, CODE CHANGES are required
in create_synapses()
- Global simulation and stimuli parameters can be changed
according to the user's needs.
'''
from __future__ import print_function
from collections import namedtuple
from contextlib import contextmanager
import sys
import random
import os
import logging
import subprocess
from neuron import h
import numpy as np
import matplotlib.pyplot as plt
from neuron_reduce import subtree_reductor
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
#############################
# simulation parameters - to be adjusted by the user
SIM_PARAMS = {'tstop': 1500, # ms
'steps_per_ms': 10,
'dt': 0.1,
}
SPIKE_BEGIN_TIME = 10 # ms
WEIGHT = 0.0008 # gbar = 0.8 nS
ICLAMP_PARAMS = {'duration': 1500, # ms
'delay': 10,
'amplitude': 0, # nA
}
EXCITATORY_STIMULI_INTERVAL = 1000 # ms - 100 ms interval is 10 hz
EXCITATORY_STIMULI_NUMBER = 15
FREQUENCY_OF_INHIBITORY_SYNAPSES_IS_HIGHER_BY = 10
INHIBITORY_STIMULI_INTERVAL = (float(EXCITATORY_STIMULI_INTERVAL) /
FREQUENCY_OF_INHIBITORY_SYNAPSES_IS_HIGHER_BY) # ms
INHIBITORY_STIMULI_NUMBER = int(float(ICLAMP_PARAMS['duration']) / INHIBITORY_STIMULI_INTERVAL)
SYNAPSE_PARAMS = {'excitatory': {'e_syn': 0, # mV
'tau_1': 0.3, # ms (like AMPA)
'tau_2': 1.8, # ms (like AMPA)
'stimuli_interval': EXCITATORY_STIMULI_INTERVAL,
'stimuli_number': EXCITATORY_STIMULI_NUMBER,
},
'inhibitory': {'e_syn': -86,
'tau_1': 1., # ms
'tau_2': 8., # ms
'stimuli_interval': INHIBITORY_STIMULI_INTERVAL,
'stimuli_number': INHIBITORY_STIMULI_NUMBER,
},
}
PERCENTAGE_OF_EXCITATORY_SYNAPSES = 85.
STIMULI_NOISE = 1
SEED_FOR_RANDOM_SYNAPSE_CLASSIFICATION = 1
def abs_diff(a, b):
'''return summed absolute difference'''
return np.sum(np.abs(a - b))
def rmsd(reduced, control):
'''find root mean squared deviation'''
diff = reduced - control
return np.sqrt(np.dot(diff, diff) / reduced.size)
@contextmanager
def chdir(path):
'''change to path and change back'''
cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(cwd)
def plot_traces(time, reduction_frequency,
original_control_voltage, reduced_cell_voltage,
np_orig_recording_vec_reduced, np_recording_vec_reduced,
rmsd_new, rmsd_old, rmsd_two_reduced_vecs,
np_recording_vec_control, np_orig_recording_vec_control):
'''plot traces'''
plt.figure(1)
plt.plot(time, original_control_voltage, label='original model ', c='b')
plt.plot(time, reduced_cell_voltage, label='subtree reduction ', c='g')
plt.plot(time, np_orig_recording_vec_reduced, label='orginal subtree reduction ', c='r')
plt.xlabel('time (ms)')
plt.ylabel('voltage at soma (mV)')
plt.title("new Reduction RMSD %s old %s )" % (rmsd_new, rmsd_old))
plt.legend(loc='lower right')
plt.figure(2)
plt.plot(time, np_recording_vec_control, label='new control - voltage at soma (mV)', c='b')
plt.plot(time, np_orig_recording_vec_control,
label='original control - voltage at soma (mV)', c='r')
plt.title("differences in control - Hz (sum = %s)" %
abs_diff(np_recording_vec_control, np_orig_recording_vec_control))
plt.legend(loc='lower right')
plt.figure(3)
plt.plot(time, np_orig_recording_vec_reduced,
label='original reduction - voltage at soma (mV)', c='b')
plt.plot(time, np_recording_vec_reduced,
label='new reduction - voltage at soma (mV)', c='g')
plt.title("differences in Reduction - %s Hz (sum = %s)" %
(reduction_frequency, rmsd_two_reduced_vecs))
plt.legend(loc='lower right')
plt.show()
def create_model(morphology_file, model_obj_name, instance_name, create_type):
'''Creates a full morphology cell instance from Neurolucida morphology model template filenames
This function is called to create the original instance to be reduced and
to create the control cell
TODO: To support non-Neurolucida morphology files, this function should be CHANGED.
'''
assert instance_name in ('original_cell', 'control_cell'), \
"name must be one of ('original_cell', 'control_cell')"
h("{objref %s}" % instance_name)
model = dict(instance_name=instance_name, model_obj_name=model_obj_name)
if create_type in ('basic', 'human'):
h("{instance_name} = new {model_obj_name}()".format(**model))
# instantiates according to morphology using import3d
nl = h.Import3d_Neurolucida3()
nl.quiet = 1
nl.input(morphology_file)
import_3d = h.Import3d_GUI(nl, 0)
instance_created = getattr(h, instance_name)
# associates cell instance with the morphology file
import_3d.instantiate(instance_created)
# this function should be included in the model.hoc file given as a parameter
instance_created.complete_full_model_creation()
elif create_type in ('bbp', 'bbpactive'):
h('{instance_name} = new {model_obj_name}(1, "{morphology_file}")'.format(
morphology_file=morphology_file, **model))
h('{instance_name} = {instance_name}.CellRef'.format(**model))
elif create_type in ('bbpnew', ):
with chdir(morphology_file[:morphology_file.rindex('/')]):
h('{instance_name} = new {model_obj_name}(0)'.format(**model))
elif create_type in ('hay', 'almog', 'allen'):
h('{instance_name} = new {model_obj_name}("{morphology_file}")'.format(
morphology_file=morphology_file, **model))
return getattr(h, instance_name)
SynapseLocation = namedtuple('SynapseLocation', 'type, section_num, x')
def create_synapses(cell_instance,
synapse_file,
seed_for_random_synapse_classification=SEED_FOR_RANDOM_SYNAPSE_CLASSIFICATION,
percentage_of_excitatory_synapses=PERCENTAGE_OF_EXCITATORY_SYNAPSES):
'''Creates the synapses according to the synapse locations file
attaches them to the given cell instance.
Returns the new synapses_list, netstims_list, netcons_list, and
randoms_list (the activity generators for the NetStims).
TODO: Supports two types of synapses, to support more types CODE CHANGES are required
'''
synapses = []
with open(synapse_file) as fd:
for line in fd:
line = line.strip()
if not line:
continue
# extracting the synapse location
line_content = line.strip("[,]").split(",")
type_ = line_content[0].strip()
section_num = int(line_content[1].strip())
x = float(line_content[2].strip()) # synapse's relative location in the section
synapses.append(SynapseLocation(type_, section_num, x))
synapses_list, netstims_list, netcons_list, randoms_list = [], [], [], []
num_of_inhibitory_syns = num_of_excitatory_syns = 0
# the seed used to randomly classify synapses from the file into different types
rand = random.Random(seed_for_random_synapse_classification).random
# iterates over synapse locations file and creates synapses, NetStims and NetCons
for i, synapse in enumerate(synapses):
if synapse.type == 'apical':
cell_instance.apic[synapse.section_num].push()
elif synapse.type == 'somatic':
cell_instance.soma[0].push()
else: # for synapses on basal subtrees
cell_instance.dend[synapse.section_num].push()
synapse = h.Exp2Syn(synapse.x)
h.pop_section()
# This code should be changed in order to enable more synaptic types:
# randomly classifies the synapse as excitatory or inhibitory
# (according to the Random object instantiated above)
# classifies according to the user-decided distribution of synapses
if rand() <= percentage_of_excitatory_synapses / 100.: # excitatory
params = SYNAPSE_PARAMS['excitatory']
# to appoint its NetStim's random-activity generating Random object
# a seed according to its index as an excitatory synapse
randoms_list.append(h.Random(num_of_excitatory_syns))
num_of_excitatory_syns += 1
else: # inhibitory synapse
params = SYNAPSE_PARAMS['inhibitory']
# to appoint its NetStim's random-activity generating Random object
# a seed according to its index as an inhibitory synapse
randoms_list.append(h.Random(num_of_inhibitory_syns))
num_of_inhibitory_syns += 1
synapse.e = params['e_syn']
synapse.tau1 = params['tau_1']
synapse.tau2 = params['tau_2']
stimuli_interval = params['stimuli_interval']
stimuli_number = params['stimuli_number']
synapses_list.append(synapse)
# creates a NetStim for the synapse and assigns it properties
netstim = h.NetStim()
netstim.interval = stimuli_interval # ms
netstim.number = stimuli_number
netstim.start = SPIKE_BEGIN_TIME
netstim.noise = STIMULI_NOISE
randoms_list[i].negexp(1) # must specify negexp distribution with mean = 1
# assigns the NetStim the Random object created above - to determine its activity
netstim.noiseFromRandom(randoms_list[i])
netstims_list.append(netstim)
# creates a NetCon and connects it to the synapse and NetStim
netcons_list.append(h.NetCon(netstims_list[i], synapses_list[i]))
netcons_list[i].weight[0] = WEIGHT # the synaptic weight
netcons_list[i].delay = 0
return synapses_list, netstims_list, netcons_list, randoms_list
def generate_random_synapses_locations(cell, syn_filename, num_syns=10000, seed=2):
basal_L = 0
tot_L = 0
syn_L = []
random_obj = random.Random(seed)
for sec in cell.basal:
basal_L += sec.L
tot_L += sec.L
for sec in cell.apical:
tot_L += sec.L
for _ in range(num_syns):
L_counter = 0
x_L = random_obj.uniform(0, tot_L)
if x_L < basal_L:
for ix, sec in enumerate(list(cell.basal)):
if L_counter + sec.L > x_L:
x = (x_L - L_counter) / sec.L
syn_L.append(["basal", ix, x])
break
else:
L_counter += sec.L
else:
x_L -= basal_L
for ix, sec in enumerate(list(cell.apical)):
if L_counter + sec.L > x_L:
x = (x_L - L_counter) / sec.L
syn_L.append(["apical", ix, x])
break
else:
L_counter += sec.L
print(syn_filename)
with open(syn_filename, 'w') as f:
for syn in syn_L:
f.write("[%s,%s,%s]\n" % (syn[0], syn[1], syn[2]))
def loadtemplate(template_file):
'''load template if not already defined'''
model_obj_name = template_file.split(".hoc")[0].split('/')[-1]
if h.name_declared(model_obj_name) == 0:
print("loading template '%s'" % model_obj_name)
h.load_file(template_file)
else:
print("WARNING The template '%s' is already defined... not loading." % model_obj_name)
def change_cell_location(soma, offset=150):
soma.push()
for i in range(int(h.n3d())):
h.pt3dchange(i,
offset + h.x3d(i),
offset + h.y3d(i),
offset + h.z3d(i),
h.diam3d(i))
def write_test_vectors(voltage_file,
reduction_frequency, synapse_file,
recording_vec_control, recording_vec_reduced):
'''
The two vectors are printed consecutively without extra spaces, first the
control vector and after it the reduced vector. Each voltage value of the
vector is in a new line.
'''
with open(voltage_file, 'w') as fd:
fd.write("This file represents the control and reduced voltage vectors "
"for the following parameters: "
"frequency = {REDUCTION_FREQUENCY} Hz, "
"synapse file = {SYNAPSE_FILE} , "
"EXCITATORY_E_SYN = {EXCITATORY_E_SYN} mV, "
"EXCITATORY_TAU_1 = {EXCITATORY_TAU_1} ms, "
"EXCITATORY_TAU_2 = {EXCITATORY_TAU_2} ms, "
"EXCITATORY_STIMULI_INTERVAL = {EXCITATORY_STIMULI_INTERVAL} ms "
"EXCITATORY_STIMULI_NUMBER = {EXCITATORY_STIMULI_NUMBER}, "
"STIMULI_NOISE = {STIMULI_NOISE}, "
"weight = {WEIGHT} microSiemens, "
"ICLAMP_AMPLITUDE = {ICLAMP_AMPLITUDE} nA, "
"ICLAMP_DELAY = {ICLAMP_DELAY} ms, "
"ICLAMP_DURATION = {ICLAMP_DURATION} ms, "
"tstop = {TSTOP} ms, "
"dt = {DT}, "
"SPIKE_BEGIN_TIME = {SPIKE_BEGIN_TIME}ms\n".format(
REDUCTION_FREQUENCY=reduction_frequency,
SYNAPSE_FILE=synapse_file,
EXCITATORY_E_SYN=SYNAPSE_PARAMS['excitatory']['e_syn'],
EXCITATORY_TAU_1=SYNAPSE_PARAMS['excitatory']['tau_1'],
EXCITATORY_TAU_2=SYNAPSE_PARAMS['excitatory']['tau_2'],
EXCITATORY_STIMULI_INTERVAL=EXCITATORY_STIMULI_INTERVAL,
EXCITATORY_STIMULI_NUMBER=EXCITATORY_STIMULI_NUMBER,
STIMULI_NOISE=STIMULI_NOISE,
WEIGHT=WEIGHT,
ICLAMP_AMPLITUDE=ICLAMP_PARAMS['amplitude'],
ICLAMP_DELAY=ICLAMP_PARAMS['delay'],
ICLAMP_DURATION=ICLAMP_PARAMS['duration'],
TSTOP=SIM_PARAMS['tstop'],
DT=SIM_PARAMS['dt'],
SPIKE_BEGIN_TIME=SPIKE_BEGIN_TIME,
))
for i in range(int(recording_vec_control.size())):
fd.write(repr(recording_vec_control.x[i]) + "\n")
for i in range(int(recording_vec_reduced.size())):
fd.write(repr(recording_vec_reduced.x[i]) + "\n")
def load_voltages(voltage_file, number_of_recordings):
'''load_voltages'''
with open(voltage_file) as fd:
fd.readline() # skips first line with the simulation configuration details
voltages = [float(v) for v in fd]
assert len(voltages) == number_of_recordings * 2
np_orig_recording_vec_control = np.array(voltages[:number_of_recordings])
np_orig_recording_vec_reduced = np.array(voltages[number_of_recordings:])
return np_orig_recording_vec_control, np_orig_recording_vec_reduced
def setup_iclamps(sec, params):
'''sets iclamp for `sec` with `params`'''
iclamp_reduced_cell = h.IClamp(0.5, sec=sec)
iclamp_reduced_cell.delay = params['delay']
iclamp_reduced_cell.dur = params['duration']
iclamp_reduced_cell.amp = params['amplitude']
return iclamp_reduced_cell
def run_test(morphology_file,
model_file,
model_file_reduced,
reduction_frequency,
manual_total_nsegs,
synapse_file,
voltage_file,
write_unit_test_vectors,
plot_voltages,
create_type,
celsius):
'''Run a test reduction
Note: This must be run from a new python instance so that neuron is reset
'''
h.celsius = celsius # needs to be set for reduction to work properly
reduction_frequency = int(reduction_frequency)
print('---------------sim data --------------------------')
print("REDUCTION_FREQUENCY %s" % reduction_frequency)
print("MODEL_FILE %s" % model_file)
print("MODEL_FILE_REDUCED %s" % model_file_reduced)
print("SYNAPSE_FILE %s" % synapse_file)
print("MORPHOLOGY_FILE %s" % morphology_file)
print("MANUAL_TOTAL_NSEGS %s" % manual_total_nsegs)
print("VOLTAGEFILE %s" % voltage_file)
print('vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n\n')
if create_type in ('almog', 'hay', 'bbpnew', 'bbpactive', 'allen', 'human'):
with chdir(model_file[:model_file.rindex('/')]):
process = subprocess.Popen(['nrnivmodl', 'mod'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print('compiling mod files')
stdout, stderr = process.communicate()
#print(stdout)
#print(stderr)
h.nrn_load_dll("x86_64/.libs/libnrnmech.so.0")
h.nrn_load_dll("x86_64/.libs/libnrnmech.0.so")
h.nrn_load_dll("x86_64/.libs/libnrnmech.so")
if create_type == 'hay':
loadtemplate(model_file[:model_file.rindex('/')] + '/L5PCbiophys3.hoc')
elif create_type == 'allen':
loadtemplate(model_file[:model_file.rindex('/')] + '/AllenBiophys.hoc')
h.load_file("import3d.hoc")
loadtemplate(model_file)
# creates original cell with synapses, and reduces it with neuron_reduce
model_obj_name = os.path.basename(model_file.split(".hoc")[0])
original_cell = create_model(morphology_file, model_obj_name, "original_cell", create_type)
# creates control cell with synapses
control_cell = create_model(morphology_file, model_obj_name, "control_cell", create_type)
synapses_list_control, netstims_list_control, netcons_list_control, random_list_control = \
create_synapses(control_cell, synapse_file)
# simulates control and reduced cell instances together
synapses_list, netstims_list, netcons_list, random_list = \
create_synapses(original_cell, synapse_file)
reduced_cell, synapses_list, netcons_list = subtree_reductor(original_cell,
synapses_list,
netcons_list,
reduction_frequency,
model_file_reduced,
manual_total_nsegs)
if control_cell.soma.hname()[-1] == ']':
control_soma = control_cell.soma[0]
reduced_soma = reduced_cell.soma[0]
else:
control_soma = control_cell.soma
reduced_soma = reduced_cell.soma
h.steps_per_ms = SIM_PARAMS['steps_per_ms']
h.dt = SIM_PARAMS['dt']
h.tstop = SIM_PARAMS['tstop']
h.v_init = reduced_soma.e_pas
# set iclamps
setup_iclamps(reduced_soma, ICLAMP_PARAMS)
setup_iclamps(control_soma, ICLAMP_PARAMS)
# sets recording vectors
recording_vec_reduced = h.Vector()
recording_vec_control = h.Vector()
recording_vec_reduced.record(reduced_soma(.5)._ref_v)
recording_vec_control.record(control_soma(.5)._ref_v)
print('Running simulations, temperature is ' + str(celsius) + 'c')
h.run()
# for debugging, it helps if the two cells don't overlap
change_cell_location(control_soma)
np_recording_vec_control = np.array(recording_vec_control)
np_recording_vec_reduced = np.array(recording_vec_reduced)
if write_unit_test_vectors:
write_test_vectors(voltage_file,
reduction_frequency, synapse_file,
recording_vec_control, recording_vec_reduced)
number_of_recordings = int(recording_vec_control.size())
np_orig_recording_vec_control, np_orig_recording_vec_reduced = \
load_voltages(voltage_file, number_of_recordings)
print('\n\n--------------- unit test results --------------')
control_vecs_equal = np.allclose(np_recording_vec_control,
np_orig_recording_vec_control,
atol=1e-8)
if not control_vecs_equal:
print("UNIT TEST FAILED: control voltage vector has been changed")
print('Complex model: unit test V - current sim V : %s' %
abs_diff(np_recording_vec_control, np_orig_recording_vec_control))
reduced_vecs_equal = np.allclose(np_recording_vec_reduced,
np_orig_recording_vec_reduced,
atol=1e-8)
if not reduced_vecs_equal:
print("UNIT TEST FAILED: reduced voltage vector has been changed")
print('Reduced model: unit test V - current sim V : %s' %
abs_diff(np_recording_vec_reduced, np_orig_recording_vec_reduced))
if control_vecs_equal and reduced_vecs_equal:
print("UNIT TEST PASSED: no significant changes to voltage vectors\n")
print('Complex model: unit test V - current sim V : %s' %
abs_diff(np_recording_vec_control, np_orig_recording_vec_control))
print('Reduced model: unit test V - current sim V : %s' %
abs_diff(np_recording_vec_reduced, np_orig_recording_vec_reduced))
# plotting graphs of both voltage traces
dt = SIM_PARAMS['dt']
time = np.arange(dt, dt * number_of_recordings + dt, dt)
rmsd_new = rmsd(np_recording_vec_reduced, np_recording_vec_control)
rmsd_old = rmsd(np_orig_recording_vec_reduced, np_orig_recording_vec_control)
rmsd_two_reduced_vecs = rmsd(np_recording_vec_reduced, np_orig_recording_vec_reduced)
print("current sim: rmsd between reduced vs complex model:", rmsd_new)
print("unit test: rmsd between reduced vs complex model:", rmsd_old)
print("rmsd between current sim reduced and unit test reduced :", rmsd_two_reduced_vecs)
if plot_voltages:
plot_traces(time, reduction_frequency,
np_recording_vec_control, np_recording_vec_reduced,
np_orig_recording_vec_reduced, np_recording_vec_reduced,
rmsd_new, rmsd_old, rmsd_two_reduced_vecs,
np_recording_vec_control, np_orig_recording_vec_control)
print('--------------------------------------- END -----------------------------------')
return control_vecs_equal and reduced_vecs_equal
def str_to_bool(s):
s = s.lower()
assert s in ('true', 'false'), '%s Must be a either true of false' % s
return s == 'true'
def main(argv):
'''main'''
orig_morphology_file = argv[1]
orig_model_file = argv[2]
reduced_model_file = argv[3]
frequency = float(argv[4])
manual_total_nsegs = int(argv[5])
synapse_file = argv[6]
voltage_file = argv[7]
write_unit_test_vectors = str_to_bool(argv[8])
plot_voltages = str_to_bool(argv[9])
create_type = argv[10]
celsius = float(argv[11])
return run_test(orig_morphology_file,
orig_model_file,
reduced_model_file,
frequency,
manual_total_nsegs,
synapse_file,
voltage_file,
write_unit_test_vectors,
plot_voltages,
create_type,
celsius)
if __name__ == "__main__":
'''Run unit tests'''
print(sys.argv)
sys.exit(not main(sys.argv))
|
{"/example/example.py": ["/neuron_reduce/__init__.py"], "/neuron_reduce/__init__.py": ["/neuron_reduce/subtree_reductor_func.py"], "/neuron_reduce/subtree_reductor_func.py": ["/neuron_reduce/reducing_methods.py"], "/tests/test_script_helper.py": ["/neuron_reduce/__init__.py"]}
|
31,505
|
kaandocal/evolutica
|
refs/heads/master
|
/master/actuator.py
|
import numpy as np
import heapq
# Actuator of an agent (incomplete)
class Actuator:
def __init__(self, agent):
self.agent = agent
self.goal = None
self.steps = []
# reorient and find a new goal
def reflect(self):
#check sensor inputs and create list of potential targets and weights
targets = []
weights = []
for sensor in self.agent.sensors:
ts, ws = sensor.sense(self.agent.x, self.agent.y)
targets += ts
weights += ws
#nothing found => stay idle
if len(targets) == 0:
self.goal = None
self.steps = []
return
#choose random target according to weight
z = np.sum(weights)
self.goal = np.random.choice(targets, p = np.asarray(weights) / z)
self.steps = self.pathfind()
if self.goal.type == None:
print("{} wonders what's at ({},{})...".format(self.agent.name, self.goal.x,self.goal.y))
else:
print("{} found {}!".format(self.agent.name, self.goal.name))
# As reflect, but ignores any inputs by the brain
def doubt(self):
print("{} is starting to have some doubts...".format(self.agent.name))
#check sensor inputs and create list of potential targets and weights
targets = []
weights = []
for sensor in self.agent.sensors:
if sensor.type.name == "Brain":
continue
ts, ws = sensor.sense(self.agent.x, self.agent.y)
targets += ts
weights += ws
#nothing found => stay idle
if len(targets) == 0:
return
#choose random target according to weight
z = np.sum(weights)
self.goal = np.random.choice(targets, p = np.asarray(weights) / z)
self.steps = self.pathfind()
print("{} found {}!".format(self.agent.name, self.goal.name))
# Implementation of the A* pathfinding algorithm
def pathfind(self):
dest = None
fringe = []
closed = set()
# Save the cost and the shortest route to each cell in a dictionary
pathdata = {}
world_w = self.agent.world.width
world_h = self.agent.world.height
# Start with the current cell
heapq.heappush(fringe,(self.estimateddistance(self.agent.x, self.agent.y),(self.agent.x, self.agent.y)))
pathdata[(self.agent.x,self.agent.y)] = (0,None)
while len(fringe) != 0:
# Check the best-looking cell in the set of unvisited tiles
f, cell = heapq.heappop(fringe)
closed.add(cell)
if cell[0] == self.goal.x and cell[1] == self.goal.y:
dest=cell
break
nbs = ((cell[0], cell[1]+1),\
(cell[0]+1, cell[1]),\
(cell[0]-1, cell[1]),\
(cell[0], cell[1]-1))
for n in nbs:
# Consider adding n to the list of cells to be visited
if (n[0] >= 0 and n[0] < world_w) and (n[1] >= 0 and n[1] < world_h) and self.agent.world.tiles[n[0],n[1]] == 0 and (n[0],n[1]) not in closed:
if (f,cell) in fringe:
continue
g = pathdata[cell][0] + 1
h = self.estimateddistance(n[0], n[1])
f = g + h
heapq.heappush(fringe,(f,n))
pathdata[n] = (g,cell)
if dest == None:
return []
# Recursively fill the list with the ancestors of the cell
ret=[]
while True:
ret.append((dest[0],dest[1]))
dest = pathdata[dest][1]
if dest == None:
break
ret.reverse()
return ret
# Heuristic for the A* algorithm
def estimateddistance(self, xcur, ycur):
manhattan = abs(self.goal.x-xcur)+abs(self.goal.y-ycur)
return manhattan
# Proposal function. Returns tuple of the next position.
def propose(self, x, y):
if self.goal == None or len(self.steps) == 0:
r = np.random.randint(0, 5)
# North
if r == 1:
x = x
y = y + 1
# South
elif r == 2:
x = x
y = y - 1
# East
elif r == 3:
x = x + 1
y = y
# West
elif r == 4:
x = x - 1
y = y
self.reflect()
else:
# Consider reorienting the agent if it is just wandering about
if self.goal.type == None:
a = min(0, 500 - self.agent.energy) / 400.0
if np.random.random_sample() <= a:
self.doubt()
# Perform the next step in the list of steps
step = self.steps[0]
x = step[0]
y = step[1]
self.steps = self.steps[1:]
# We have reached our goal
if len(self.steps) == 0:
self.agent.touch(self.goal)
self.goal = None
return (x, y)
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,506
|
kaandocal/evolutica
|
refs/heads/master
|
/vis.py
|
import master.world
import master.food
import master.sensor
import matplotlib.pyplot as plt
import pickle
import sys
filename = sys.argv[1]
inp = open(filename, "rb")
world = pickle.load(inp)
living = []
extinct = []
multiplied = []
for ent in world.halloffame:
if not ent.deceased:
living.append(ent)
elif len(ent.children) == 0:
extinct.append(ent)
else:
multiplied.append(ent)
print("{} living, {} extinct, {} passed on their genes".format(len(living),len(extinct),len(multiplied)))
def descendants(ent):
return sum([1 + descendants(c) for c in ent.children])
def hunger_sort(ent):
return sum([ ent.food_eaten[k] for k in master.food.foodtypes ])
def life_sort(ent):
return ent.deceased - ent.birthday
def sensorstrength(ent, st):
ret = 0
for s in ent.sensors:
if s.type == st:
ret += s.resolution
return ret
living_s = sorted(living, key=hunger_sort)
for ent in living_s:
ent.dumpbio()
ent.dumpsensors()
extinct_s = sorted(extinct, key=life_sort)
for ent in extinct_s:
ent.dumpbio()
ent.dumpsensors()
multiplied_s = sorted(multiplied, key=descendants)
for ent in multiplied_s:
ent.dumpbio()
ent.dumpsensors()
for st in master.sensor.sensortypes:
fig = plt.figure()
fig.suptitle(st.name)
ax = plt.subplot()
xlist = [ sensorstrength(ent, st) for ent in multiplied ]
ylist = [ descendants(ent) for ent in multiplied ]
ax.scatter(xlist, ylist)
ax.set_ylabel("Number of descendants")
ax.set_xlabel("Sensor strength")
plt.show()
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,507
|
kaandocal/evolutica
|
refs/heads/master
|
/master/sensor.py
|
import numpy as np
from .food import Food
from .entity import Ghost
import pygame
class Sensor():
def __init__(self, world, type, resolution = 0):
self.world = world
self.type = type
self.resolution = min(15,max(0,resolution))
#return list of elements perceived, weighted by perception strength
def sense(self, pos):
pass
# The next three sensors are identical but for their names
class Nose(Sensor):
name = "Nose"
def __init__(self, world, resolution = 0):
Sensor.__init__(self, world, Nose, resolution)
def sense(self, x, y):
entities = self.world.entities
targets = []
weights = []
#(filter out odourless entities)
for ent in entities:
if ent.type == Food and ent.foodtype.smells:
dist = np.sqrt((x - ent.x) ** 2 + (y - ent.y) ** 2)
#strength of smell is dependent on the distance and the resolution of the sensor
strength = self.resolution - dist
if strength >= 1 and self.world.walkable(ent.x, ent.y):
targets.append(ent)
weights.append(strength)
return targets, weights
class Ear(Sensor):
name = "Ear"
def __init__(self, world, resolution = 0):
Sensor.__init__(self, world, Ear, resolution)
def sense(self, x, y):
entities = self.world.entities
targets = []
weights = []
#(filter out odourless entities)
for ent in entities:
if ent.type == Food and ent.foodtype.sounds:
dist = np.sqrt((x - ent.x) ** 2 + (y - ent.y) ** 2)
#loudness is dependent on the distance and the resolution of the sensor
strength = self.resolution - dist
if strength >= 1 and self.world.walkable(ent.x, ent.y):
targets.append(ent)
weights.append(strength)
return targets, weights
class Eye(Sensor):
name = "Eye"
def __init__(self, world, resolution = 0):
Sensor.__init__(self, world, Eye, resolution)
def sense(self, x, y):
entities = self.world.entities
targets = []
weights = []
#(filter out odourless entities)
for ent in entities:
if ent.type == Food and ent.foodtype.visible:
dist = np.sqrt((x - ent.x) ** 2 + (y - ent.y) ** 2)
#clarity is dependent on the distance and the resolution of the sensor
strength = self.resolution - dist
if strength >= 1 and self.world.walkable(ent.x, ent.y):
targets.append(ent)
weights.append(strength)
return targets, weights
# This is a bit different
class Brain(Sensor):
name = "Brain"
def __init__(self, world, resolution = 0):
Sensor.__init__(self, world, Brain, resolution)
self.target = Ghost(world, 0, 0)
def sense(self, x, y):
self.target.x = np.random.randint(0, self.world.width)
self.target.y = np.random.randint(0, self.world.height)
targets = [self.target]
# Preference given to faraway targets (why?)
weights = [self.resolution * 0.05 * (abs(x - self.target.x) + abs(y - self.target.y))/ (self.world.width + self.world.height)]
return targets, weights
sensortypes = [ Nose, Eye, Ear, Brain ]
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,508
|
kaandocal/evolutica
|
refs/heads/master
|
/master/gfx.py
|
import pygame
images = {}
default_size = None
# Since pygame images cannot be pickled, this returns a handle to the image loaded
def load_image(name, size = None):
if not name in images.keys():
img = pygame.image.load('img/{}.bmp'.format(name))
images[name] = img
return (name, size)
# Returns the actual image
def get_image(t):
img = images[t[0]]
size = t[1]
if size == (0,0):
return img
elif size == None:
size = default_size
if size != None:
img = pygame.transform.scale(img, size)
return img
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,509
|
kaandocal/evolutica
|
refs/heads/master
|
/master/agent.py
|
from .entity import Entity
from .actuator import Actuator
from .sensor import Sensor, Nose, Ear, Brain, Eye
from .food import Food, foodtypes
from .gfx import load_image, get_image
# agent class
import numpy as np
names = [ 'Larry', 'Patrick', 'Hannah', 'Angelina', 'Bert', 'Margaret', 'Bob', 'Vicky', 'Oliver', 'Emily', 'Lil\' Ron', 'Jackie', 'Katy P', 'Dieter', 'Elias', 'Alex', 'Mike', 'Gabe', 'Moe', 'Hazel', 'Bella', 'Aubrey', 'Penelope', 'Lizzie', 'Ed', 'Em']
class Agent(Entity):
# Maximal energy an entity can store
Emax = 500
# constructs an agent with x,y coordinates and instantiates an Actuator
def __init__(self, world, x, y):
Entity.__init__(self, world, x, y, Agent)
self.sensors = []
self.metabolic_cost = 1
self.energy = int((0.4 + 0.6 * np.random.random_sample())* Agent.Emax)
self.actuator = Actuator(self)
self.image = load_image("agent")
# For statistical purposes
self.children = []
self.name = np.random.choice(names)
self.food_eaten = {}
for ft in foodtypes:
self.food_eaten[ft] = 0
self.last_eaten = -1
self.birthday = world.round
def addsensor(self, type, resolution):
if type == Brain:
self.image = load_image("brainy")
assert(0 <= resolution and resolution <= 15)
self.sensors.append(type(self.world, resolution))
self.metabolic_cost += resolution * self.world.sensor_cost
# updates agents
def update(self):
x, y = self.actuator.propose(self.x, self.y)
if self.world.walkable(x,y):
self.x = x
self.y = y
self.energy -= self.metabolic_cost
if self.energy <= 0:
self.die()
# generate offspring with a probability depending on the energy level
p_offspring = max(0, (self.energy - 350) / 4500.)
if np.random.random_sample() <= p_offspring:
self.circleoflife()
def die(self):
print("*{} didn't make it".format(self.name))
self.deceased = self.world.round
self.world.remove_entity(self)
def circleoflife(self):
s = self.world.spawn(Agent, self.x, self.y)
d = self.world.spawn(Agent, self.x, self.y)
# Pass on the sensors with random mutations
for sensor in self.sensors:
sstr_s = sensor.resolution + np.random.normal(0, self.world.mutation_variance)
sstr_d = sensor.resolution + np.random.normal(0, self.world.mutation_variance)
s.addsensor(sensor.type, sstr_s)
d.addsensor(sensor.type, sstr_d)
# Split up the energy between both descendants, including a bonus
energy = self.energy + np.random.randint(50,100)
share_s = 0.3 + 0.4 * np.random.random_sample()
s.energy = int(share_s * energy)
d.energy = int((1 - share_s) * energy)
print("*plop plop*")
print("{}'s been busy it seems...*".format(self.name))
print("Happy B-day, {} and {}!".format(s.name, d.name))
self.children.append(s)
self.children.append(d)
self.deceased = self.world.round
self.world.remove_entity(self)
def render(self, surf, tile_size):
surf.blit(get_image(self.image), (self.x * tile_size, self.y * tile_size))
def touch(self, other):
# Gross
if other.deceased:
return
if other.type == Food:
other.disappear()
print("{}: *munch munch*".format(self.name))
self.food_eaten[other.foodtype] += 1
self.energy = min(Agent.Emax, self.energy + other.foodtype.nutritional_value)
self.last_eaten = self.world.round
def dumpbio(self):
if self.deceased:
print("--------------------------------------")
print("Fondly remembering {}".format(self.name))
print("(Round {} - Round {})".format(self.birthday, self.deceased))
if len(self.children) != 0:
print("{} gave us two beautiful children: {} and {}".format(self.name, self.children[0].name, self.children[1].name))
else:
print("Poor {} starved to death in a cruel world...".format(self.name))
print("--------------------------------------")
else:
print("--------------------------------------")
print("Proudly presenting {}".format(self.name))
print("Born in Round {}".format(self.birthday))
print("We all wish the best for {}".format(self.name))
print("--------------------------------------")
print("--------------------------------------")
def dumpfood(self):
print("--------------------------------------")
print("Food stats for {}:".format(self.name))
total_food = sum([v for v in self.food_eaten.values()])
print("Total Food Eaten: {}".format(total_food))
print("Last Food Eaten: Round {}/{}".format(self.last_eaten, self.world.round))
print("Energy Reserves: {:.1f}".format(self.energy))
for ft in foodtypes:
print("Amount of {} eaten: {}".format(ft.name, self.food_eaten[ft]))
print("--------------------------------------")
def dumpsensors(self):
print("--------------------------------------")
print("Sensor stats for {}:".format(self.name))
for s in self.sensors:
print("{}: {:.1f}/15".format(s.name,s.resolution))
print("--------------------------------------")
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,510
|
kaandocal/evolutica
|
refs/heads/master
|
/run.py
|
#import modules
import pygame, sys, random
#import some useful constants
from pygame.locals import *
#import classes
from master.entity import Entity
from master.world import World
from master.agent import Agent
from master.sensor import Nose, Ear, Eye, Brain, sensortypes
from master.food import Food, FoodType
import master.food as food
import master.gfx as gfx
import master.data as data
#constants
speeds = { K_1: 2, K_2: 5, K_3: 10, K_4: 20, K_5 : 30, K_6 : 40, K_7 : 80 }
speed = speeds[K_1]
TILESIZE = 20
gfx.default_size = (TILESIZE,TILESIZE)
#colours
BLACK = (0,0,0)
WHITE = (255,255,255)
DARK_GRAY = (100,100,100)
RED = (200, 0, 0)
GREEN = (0,200,0)
BLUE = (0,0,200)
#start the pygame library and create a clock module
pygame.init()
fpsClock = pygame.time.Clock()
wall_image = gfx.get_image(gfx.load_image("wall"))
if len(sys.argv) < 2:
print("Usage: run.py [world file]")
exit(1)
world_filename = sys.argv[1]
# World file syntax:
# Agent X Y - spawn agent at (X,Y)
# Sensor TYPE RES - add sensor of type TYPE and resolution RES to last spawned agent
# Option OPTION VALUE - sets global constants. Possible option names are:
# MutationVar - variance of mutations during reproduction (sensor resolution)
# FoodPerRound - units of food spawned per round (should be between 0 and 1)
# SensorCost - amount of energy used by each sensor per round and resolution
def init_world(filename):
inp = open(filename, "r")
world = World('world')
for l in inp.readlines():
words = l.split(' ')
if len(words) == 0:
continue
if words[0] == "Agent":
if len(words) < 3:
print("Warning: Cannot parse line '{}'".format(l))
continue
x = int(words[1])
y = int(words[2])
world.spawn(Agent, x, y)
elif words[0] == "Sensor":
if len(world.entities) == 0:
print("Warning: Adding sensors to non-entity")
continue
if len(words) < 3:
print("Warning: Cannot parse line '{}'".format(l))
continue
res = float(words[2])
ent = world.entities[-1]
success = False
for st in sensortypes:
if words[1] == st.name:
ent.addsensor(st, resolution=res)
success = True
break
if not success:
print("Warning: Cannot parse line '{}'".format(l))
elif words[0] == "Option":
if len(words) < 3:
print("Warning: Cannot parse line '{}'".format(l))
continue
if words[1] == "MutationVar":
world.mutation_variance = float(words[2])
elif words[1] == "SensorCost":
world.sensor_cost = float(words[2])
elif words[1] == "FoodPerRound":
world.distributor.fpr = float(words[2])
else:
print("Warning: Cannot parse line '{}'".format(l))
else:
print("Warning: Cannot parse line '{}'".format(l))
return world
food.foodtypes.append(FoodType("burger", 300, 50, smells=True, sounds=False, visible=False))
food.foodtypes.append(FoodType("chicken", 300, 50, smells=False, sounds=True, visible=False))
food.foodtypes.append(FoodType("apple", 300, 50, smells=False, sounds=False, visible=True))
world = init_world("brainvsnone")
#set up display
DISP_SURF = pygame.display.set_mode((world.width*TILESIZE, world.height*TILESIZE))
pygame.display.set_caption('World')
#draws the entities onto the grid
def render():
for entity in world.entities:
entity.render(DISP_SURF, TILESIZE)
#world loop
while True:
#get all the user events
for event in pygame.event.get():
#if the user wants to quit
if event.type == QUIT:
data.dump(world)
pygame.quit()
sys.exit()
#user clicks on an entity or a tile
elif event.type == MOUSEBUTTONUP:
pass
#key command
elif event.type == KEYDOWN:
key = event.key
#pause game
if key == K_SPACE:
speed = 0
#set game speed
elif key in speeds.keys():
speed = speeds[key]
#dump game data
elif key == K_s:
data.dump(world)
elif key == K_i:
data.sensorinfo(world)
elif key == K_f:
data.foodinfo(world)
elif key == K_r:
data.hof(world)
#draw grid
DISP_SURF.fill(BLACK)
for row in range(world.height):
for column in range(world.width):
if world.tiles[column,row] == World.TILE_WALL:
DISP_SURF.blit(wall_image, (column * TILESIZE, row * TILESIZE))
#update the display
if speed != 0:
fpsClock.tick(speed)
world.update()
else:
fpsClock.tick(10)
render()
pygame.display.update()
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,511
|
kaandocal/evolutica
|
refs/heads/master
|
/master/entity.py
|
class Entity:
# x und y geben die Koordinaten des Objekt Entity an (Typ Integer)
def __init__(self, world, x, y, type = None):
self.name = None
self.world = world
self.type = type
self.x = x
self.y = y
self.deceased = None
# update function
# should be implemented by each child class of Entity
def update(self):
pass
def render(self, surface, tile_size):
pass
def touch(self, other):
pass
# Imaginary targets used by the brain
class Ghost(Entity):
def __init__(self, world, x, y):
Entity.__init__(self, world, x, y, None)
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,512
|
kaandocal/evolutica
|
refs/heads/master
|
/master/data.py
|
from .world import World
from .agent import Agent
import pickle
# Pickles the world into a file
def dump(self):
i = 0
while True:
filename = "saves/save{}".format(i)
try:
out = open(filename, "rb")
except IOError:
break
i += 1
out = open(filename, "wb")
pickle.dump(self, out)
print("Saved game data to file '{}'".format(filename))
def foodinfo(world):
for ent in world.entities:
if ent.type == Agent:
ent.dumpfood()
def sensorinfo(world):
for ent in world.entities:
if ent.type == Agent:
ent.dumpsensors()
def hof(world):
for ent in world.halloffame:
ent.dumpbio()
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,513
|
kaandocal/evolutica
|
refs/heads/master
|
/master/world.py
|
from .agent import Agent
from .food import Food, Distributor
from .entity import Entity
from .gfx import load_image, get_image
import numpy as np
import pygame
#represents the world grid with all entities
class World:
TILE_WALL = 1
def __init__(self, filename):
worldmap = load_image(filename, (0,0))
worldmap = get_image(worldmap).convert(8)
self.width, self.height = worldmap.get_size()
# List containing: currently alive entities, entities that died in the current round, all entities that ever lived
self.entities = []
self.remove_list = []
self.halloffame = []
self.distributor = Distributor(self)
self.tiles = np.zeros((self.width,self.height),dtype='uint32')
#build wall around world
self.tiles[0,:] = self.tiles[-1,:] = World.TILE_WALL
self.tiles[:,0] = self.tiles[:,-1] = World.TILE_WALL
self.round = 0
self.sensor_cost = 0.09
self.mutation_variance = 1
# Fill in the tiles according to loaded world map file
for i in range(self.width):
for j in range(self.height):
if worldmap.get_at((i,j)) != (0,0,0):
self.tiles[i,j] = World.TILE_WALL
def walkable(self, x, y):
return (x >= 0 and x < self.width) and (y >= 0 and y < self.height) and self.tiles[x,y] != World.TILE_WALL
# Spawns entities
def spawn(self, constructor, x, y, *args, **kwargs):
if x < 0 or x >= self.width or y < 0 or y >= self.height:
print("Warning: Trying to spawn entity outside world")
return
#avoid spawning entities in walls
if self.tiles[x,y] == World.TILE_WALL:
pass
ent = constructor(self,x,y,*args,**kwargs)
self.entities.append(ent)
if ent.type == Agent:
self.halloffame.append(ent)
return ent
def remove_entity(self, ent):
ent.deceased = self.round
self.remove_list.append(ent)
#updates all entities and the world grid
def update(self):
#call the update method of all entities in the world
for entity in self.entities:
entity.update()
for e2 in self.entities:
if e2 is not entity and entity.x == e2.x and entity.y == e2.y:
entity.touch(e2)
for ent in self.remove_list:
if ent in self.entities:
self.entities.remove(ent)
self.remove_list = []
self.distributor.update()
self.round += 1
return
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,514
|
kaandocal/evolutica
|
refs/heads/master
|
/master/food.py
|
import numpy as np
from .entity import Entity
from .gfx import load_image, get_image
class FoodType:
def __init__(self, name, shelflife, nutritional_value, smells, sounds, visible):
self.name = name
self.image = load_image(name)
self.shelflife = shelflife
self.nutritional_value = nutritional_value
# whether this type of food can be smelled, heard or seen
self.smells = smells
self.sounds = sounds
self.visible = visible
class Food(Entity):
def __init__(self, world, x, y, foodtype):
Entity.__init__(self, world, x, y, Food)
self.foodtype = foodtype
self.name = self.foodtype.name
self.bday = self.world.round
def disappear(self):
self.world.remove_entity(self)
def render(self, surf, tile_size):
surf.blit(get_image(self.foodtype.image), (self.x * tile_size, self.y * tile_size))
def update(self):
# For food safety purposes
if self.world.round >= self.bday + self.foodtype.shelflife:
self.disappear()
foodtypes = [ ]
# Class responsible for spawning food at random
class Distributor:
def __init__(self, world, fpr = 0.5):
self.world = world
# fpr = Food per round
self.fpr = fpr
def update(self):
if np.random.random_sample() <= self.fpr:
foodtype = np.random.choice(foodtypes)
x = np.random.randint(1,self.world.width - 1)
y = np.random.randint(1,self.world.height - 1)
if self.world.walkable(x,y):
food = self.world.spawn(Food,x,y,foodtype)
|
{"/vis.py": ["/master/world.py", "/master/food.py", "/master/sensor.py"], "/master/sensor.py": ["/master/food.py", "/master/entity.py"], "/master/agent.py": ["/master/entity.py", "/master/actuator.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py"], "/run.py": ["/master/entity.py", "/master/world.py", "/master/agent.py", "/master/sensor.py", "/master/food.py", "/master/gfx.py", "/master/data.py"], "/master/data.py": ["/master/world.py", "/master/agent.py"], "/master/world.py": ["/master/agent.py", "/master/food.py", "/master/entity.py", "/master/gfx.py"], "/master/food.py": ["/master/entity.py", "/master/gfx.py"]}
|
31,590
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/urls.py
|
from django.urls import path, include
from core import views as core_views
urlpatterns = [
path('', core_views.index, name='index'),
path('postlink/<int:pk>', core_views.PostLinkDetailView.as_view(), name='postlink-detail'),
path('postlink/<int:pk>/vote/', core_views.postlink_vote_view,
name="postlink-vote"),
path('profile/<username>', core_views.get_user_profile, name="user-profile"),
path('postlink/<int:pk>/comments/', core_views.new_comment, name="new_comment"),
]
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,591
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/migrations/0003_auto_20190322_1426.py
|
# Generated by Django 2.1.7 on 2019-03-22 19:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20190321_2247'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='post',
new_name='postlink',
),
]
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,592
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/migrations/0002_auto_20190321_2247.py
|
# Generated by Django 2.1.7 on 2019-03-22 03:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='vote',
old_name='post',
new_name='postlink',
),
]
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,593
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/views.py
|
from django.shortcuts import render, redirect, get_object_or_404
from core.models import PostLink, User, Vote, Comment, HashTag
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.decorators.http import require_http_methods
from django.views import generic
from django.db.models import Count, Max, F
# Create your views here.
def get_user_profile(request, username):
user = User.objects.get(username=username)
return render(request, 'core/user_profile.html', {"user":user})
def index(request):
"""Index View"""
model = PostLink
postlinks = PostLink.objects.all().annotate(num_comments=Count('comment')).order_by('-num_comments', '-created_at')
num_postlinks = PostLink.objects.all().count()
context = {
'postlinks' : postlinks,
'num_postlinks' : num_postlinks,
}
return render(request, 'index.html', context=context)
@require_http_methods(['POST'])
@login_required
def postlink_vote_view(request, pk):
postlink = get_object_or_404(PostLink, pk=pk)
vote, created = request.user.vote_set.get_or_create(postlink=postlink)
if created:
messages.success(request, f"You have voted for {postlink.title}.")
pass
else:
messages.info(request, f"You have removed your vote for {postlink.title}.")
vote.delete()
return redirect(postlink.get_absolute_url())
class PostLinkDetailView(generic.DetailView):
model = PostLink
paginate_by = 20
# this was our new attempt at new postdetail
# def postlink_detail_view(request, pk):
# """Postlink detail View"""
# postlinks = PostLink.objects.all()
# comments = Comment.objects.all()
# # num_comments = Comment.postlink.count
# context = {
# 'postlinks' : postlinks,
# 'comments' : comments,
# # 'num_comments' : num_comments,
# }
# return render(request, 'detail_postlink.html', context=context)
@require_http_methods(['POST'])
@login_required
def new_comment(request, pk):
postlink = get_object_or_404(PostLink, pk=pk)
comment, created = request.user.comment_set.get_or_create(postlink=postlink)
return redirect(postlink.get_absolute_url())
@login_required
def post_delete(request, id):
post = get_object_or_404(Post, id=id)
post.delete()
return redirect('index.html')
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,594
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/forms.py
|
from django import forms
from django.forms import ModelForm
from core.models import PostLink, Comment, Vote, HashTag, User
class CommentForm(forms.ModelForm):
model = Comment
fields = ['post_comment']
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'post_link', 'user',)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__'
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,595
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/models.py
|
from django.db import models
from autoslug import AutoSlugField
from django.urls import reverse
from django.contrib.auth import get_user_model
# Create your models here.
User = get_user_model()
class HashTag(models.Model):
topic = models.CharField(max_length=50)
def __str__(self):
return self.topic
class PostLink(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
post_url = models.CharField(max_length=100, null=True, blank=True)
title = models.CharField(max_length=100)
description = models.TextField(max_length=1000)
slug = AutoSlugField(populate_from='title', unique=True)
created_at = models.DateTimeField(auto_now_add=True)
topic = models.ManyToManyField(HashTag, blank=True)
voted_by = models.ManyToManyField(to=User, related_name='voted_postlinks', through='Vote')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('postlink-detail', args=(self.id,))
class Vote(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
postlink = models.ForeignKey(PostLink, on_delete=models.CASCADE)
voted_date = models.DateTimeField(auto_now_add=True)
class Comment(models.Model):
commenter = models.ForeignKey(User, on_delete=models.CASCADE)
post_comment = models.TextField(max_length=255)
postlink = models.ForeignKey(PostLink, on_delete=models.CASCADE)
comment_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['comment_date']
def __str__(self):
return reverse('postlink-detail', args=(self.id,))
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,596
|
momentum-cohort-2019-02/w6-apile-puppy-power
|
refs/heads/master
|
/core/admin.py
|
from django.contrib import admin
from core.models import PostLink, Comment, Vote, HashTag, User
# Register your models here.
@admin.register(PostLink)
class PostLinkAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'post_url', 'slug')
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
pass
@admin.register(Vote)
class CommentAdmin(admin.ModelAdmin):
pass
@admin.register(HashTag)
class CommentAdmin(admin.ModelAdmin):
pass
|
{"/core/views.py": ["/core/models.py"], "/core/forms.py": ["/core/models.py"], "/core/admin.py": ["/core/models.py"]}
|
31,597
|
alex3305/pyscinanled
|
refs/heads/master
|
/examples/example_simple.py
|
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
import argparse
import time
from pyscinanled import ScinanLed
parser = argparse.ArgumentParser()
parser.add_argument('-m', dest='mac', type=str,
help='MAC address of bluetooth device')
args = parser.parse_args()
if args.mac is None:
print('A MAC address is required to run this example')
exit(1)
print('Connecting to your led strip...')
dev = ScinanLed(args.mac)
print('Turning on your led strip')
dev.turn_on()
time.sleep(5)
print('Set the brightness to 20%')
dev.brightness(20)
time.sleep(5)
print('Set the brightness to 100%')
dev.brightness(100)
time.sleep(5)
print('Turning off the led strip and disconnect')
dev.turn_off()
dev.disconnect()
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,598
|
alex3305/pyscinanled
|
refs/heads/master
|
/pyscinanled/btle_connection.py
|
import logging
import time
from .const import CHAR_START_HANDLE, CHAR_END_HANDLE, CHAR_UUID
from bluepy.btle import UUID, Peripheral, BTLEException
_LOGGER = logging.getLogger(__name__)
class BtleConnection:
def __init__(self, mac: str, retries: int):
self._mac = mac
self._device = None
self._characteristic = None
self._retries = retries
return
def connect(self, retry_count: int = 0):
if retry_count >= self._retries:
raise TimeoutError('Could not reach device ' + self._mac)
try:
self._device = Peripheral()
self._device.connect(self._mac)
self._characteristic = self._device.getCharacteristics(
CHAR_START_HANDLE, CHAR_END_HANDLE, UUID(CHAR_UUID))[0]
except BTLEException:
_LOGGER.warning("Could not connect to %s, retrying...", self._mac)
time.sleep(1)
self.connect(retry_count + 1)
def disconnect(self):
if self._device is not None:
self._device.disconnect()
self._device = None
self._characteristic = None
def send_command(self, command):
self._characteristic.write(bytearray(command))
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,599
|
alex3305/pyscinanled
|
refs/heads/master
|
/pyscinanled/const.py
|
from .effect import Effect
BRIGHTNESS_MIN = 10
BRIGHTNESS_MAX = 110
CHAR_START_HANDLE = 0x0024
CHAR_END_HANDLE = 0x0024
CHAR_UUID = "0000fff1-0000-1000-8000-00805f9b34fb"
COMMAND_BRIGHTNESS = [0x03, 0x01, 0x01]
COMMAND_EFFECT = [0x05, 0x01, 0x02, 0x03]
COMMAND_SWITCH = [0x01, 0x01, 0x01]
DEFAULT_RETRIES = 5
EFFECT_LIST = [
Effect('Wave', 1),
Effect('Phasing', 2),
Effect('Fade away in phase', 4),
Effect('Twinkling in phase', 8),
Effect('Fade away', 16),
Effect('Fast twinkling', 32),
Effect('Stay on', 64)
]
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,600
|
alex3305/pyscinanled
|
refs/heads/master
|
/pyscinanled/effect.py
|
import random
class Effect:
def __init__(self, name: str, value: int):
self._name = name
self._value = value
@property
def Name(self):
return self._name
@property
def Value(self):
return self._value
@staticmethod
def get_effect(effects: list, name: str = None, bitmask: int = None):
if name is not None:
for e in effects:
if e.Name == name:
return e
elif bitmask is not None:
for e in effects:
if e.Value == bitmask:
return e
@staticmethod
def get_random_effect(effects: list):
return random.sample(effects, 1)[0]
@staticmethod
def combine_effects(effects: list):
bitmask = 0
for e in effects:
bitmask += e.Value
return bitmask
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,601
|
alex3305/pyscinanled
|
refs/heads/master
|
/examples/example_effect.py
|
# Hack to allow relative import above top level package
import sys
import os
folder = os.path.dirname(os.path.abspath(__file__)) # noqa
sys.path.insert(0, os.path.normpath("%s/.." % folder)) # noqa
import argparse
import time
from pyscinanled import ScinanLed, Effect, EFFECT_LIST
parser = argparse.ArgumentParser()
parser.add_argument('-m', dest='mac', type=str,
help='MAC address of bluetooth device')
args = parser.parse_args()
if args.mac is None:
print('A MAC address is required to run this example')
exit(1)
print('Connecting to your led strip...')
dev = ScinanLed(args.mac)
print('Activating a random effect')
dev.effect(Effect.get_random_effect(EFFECT_LIST))
time.sleep(5)
print('Activating all effects')
dev.effect(Effect.combine_effects(EFFECT_LIST))
time.sleep(5)
print('Returning to constant effect')
dev.effect(Effect.get_effect(EFFECT_LIST, name='Stay on'))
dev.disconnect()
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,602
|
alex3305/pyscinanled
|
refs/heads/master
|
/pyscinanled/__init__.py
|
from .btle_connection import BtleConnection
from .effect import Effect
from .led import ScinanLed
from .const import BRIGHTNESS_MIN, BRIGHTNESS_MAX, DEFAULT_RETRIES, EFFECT_LIST
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,603
|
alex3305/pyscinanled
|
refs/heads/master
|
/pyscinanled/led.py
|
from .btle_connection import BtleConnection
from .const import BRIGHTNESS_MAX, BRIGHTNESS_MIN, DEFAULT_RETRIES, \
COMMAND_BRIGHTNESS, COMMAND_EFFECT, COMMAND_SWITCH
from .effect import Effect
class ScinanLed:
def __init__(self, mac: str, retries: int = DEFAULT_RETRIES):
self._connection = BtleConnection(mac, retries)
self._connection.connect()
return
def brightness(self, brightness: int):
if brightness > BRIGHTNESS_MAX:
brightness = BRIGHTNESS_MAX
if brightness < BRIGHTNESS_MIN:
brightness = BRIGHTNESS_MIN
command = COMMAND_BRIGHTNESS.copy()
command.append(brightness)
self._connection.send_command(command)
def disconnect(self):
self._connection.disconnect()
def effect(self, effect: Effect = None, effects: list = None,
bitmask: int = None):
command = COMMAND_EFFECT.copy()
if effect is not None:
command.append(effect.Value)
elif effects is not None:
command.append(Effect.combine_effects(effects))
elif bitmask is not None:
command.append(bitmask)
self._connection.send_command(command)
def turn_on(self):
command = COMMAND_SWITCH.copy()
command.append(0x01)
self._connection.send_command(command)
def turn_off(self):
command = COMMAND_SWITCH.copy()
command.append(0x00)
self._connection.send_command(command)
def __del__(self):
self.disconnect()
|
{"/examples/example_simple.py": ["/pyscinanled/__init__.py"], "/pyscinanled/btle_connection.py": ["/pyscinanled/const.py"], "/pyscinanled/const.py": ["/pyscinanled/effect.py"], "/examples/example_effect.py": ["/pyscinanled/__init__.py"], "/pyscinanled/__init__.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/effect.py", "/pyscinanled/led.py", "/pyscinanled/const.py"], "/pyscinanled/led.py": ["/pyscinanled/btle_connection.py", "/pyscinanled/const.py", "/pyscinanled/effect.py"]}
|
31,607
|
jackfirth/flask-negotiate
|
refs/heads/master
|
/tests.py
|
import unittest
from flask import Flask
from flask_negotiate2 import consumes, produces
HTML = 'text/html'
JSON = 'application/json'
XML = 'application/xml'
ANY_TEXT = 'text/*'
ANY = '*/*'
UNSUPPORTED_CODE = 415
NOT_ACCEPTABLE_CODE = 406
CONSUMES_JSON_ONLY = '/consumes_json_only'
CONSUMES_JSON_AND_HTML = '/consumes_json_and_html'
PRODUCES_JSON_ONLY = '/produces_json_only'
PRODUCES_JSON_AND_HTML = '/produces_json_and_html'
HTML_HEADERS = {'Accept': HTML}
JSON_HEADERS = {'Accept': JSON}
XML_HEADERS = {'Accept': XML}
ANY_HEADERS = {'Accept': ANY}
ANY_TEXT_HEADERS = {'Accept': ANY_TEXT}
class NegotiateTestCase(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.config['TESTING'] = True
@app.route(CONSUMES_JSON_ONLY)
@consumes(JSON)
def consumes_json_only():
return CONSUMES_JSON_ONLY
@app.route(CONSUMES_JSON_AND_HTML)
@consumes(JSON, HTML)
def consumes_json_and_html():
return CONSUMES_JSON_AND_HTML
@app.route(PRODUCES_JSON_ONLY)
@produces(JSON)
def produces_json_only():
return PRODUCES_JSON_ONLY
@app.route(PRODUCES_JSON_AND_HTML)
@produces(JSON, HTML)
def produces_json_and_html():
return PRODUCES_JSON_AND_HTML
self.app = app
self.client = app.test_client()
def assertUnsupported(self, r):
self.assertEqual(r.status_code, UNSUPPORTED_CODE)
def assertUnacceptable(self, r):
self.assertEqual(r.status_code, NOT_ACCEPTABLE_CODE)
def test_consumes_json_only_valid_content(self):
r = self.client.get(CONSUMES_JSON_ONLY, content_type=JSON)
self.assertIn(CONSUMES_JSON_ONLY, r.data)
def test_consumes_json_only_invalid_content(self):
r = self.client.get(CONSUMES_JSON_ONLY, content_type=HTML)
self.assertUnsupported(r)
def test_consumes_json_and_html_valid_content(self):
for content_type in [JSON, HTML]:
r = self.client.get(CONSUMES_JSON_AND_HTML,
content_type=content_type)
self.assertIn(CONSUMES_JSON_AND_HTML, r.data)
def test_consumes_json_and_html_invalid_content(self):
r = self.client.get(CONSUMES_JSON_AND_HTML, content_type=XML)
self.assertUnsupported(r)
def test_produces_json_only_valid_accept(self):
r = self.client.get(PRODUCES_JSON_ONLY, headers=JSON_HEADERS)
self.assertIn(PRODUCES_JSON_ONLY, r.data)
def test_produces_json_only_invalid_accept(self):
r = self.client.get(PRODUCES_JSON_ONLY, headers=HTML_HEADERS)
self.assertUnacceptable(r)
def test_produces_json_and_html_valid_accept(self):
for headers in [JSON_HEADERS, HTML_HEADERS]:
r = self.client.get(PRODUCES_JSON_AND_HTML, headers=headers)
self.assertIn(PRODUCES_JSON_AND_HTML, r.data)
def test_produces_json_and_html_ivalid_accept(self):
r = self.client.get(PRODUCES_JSON_AND_HTML, headers=XML_HEADERS)
self.assertUnacceptable(r)
def test_produces_json_only_accept_any(self):
r = self.client.get(PRODUCES_JSON_ONLY, headers=ANY_HEADERS)
self.assertIn(PRODUCES_JSON_ONLY, r.data)
def test_produces_json_and_html_accept_any_text(self):
r = self.client.get(PRODUCES_JSON_AND_HTML, headers=ANY_TEXT_HEADERS)
self.assertIn(PRODUCES_JSON_AND_HTML, r.data)
|
{"/tests.py": ["/flask_negotiate2.py"]}
|
31,608
|
jackfirth/flask-negotiate
|
refs/heads/master
|
/flask_negotiate2.py
|
from functools import wraps
from itertools import groupby
from operator import itemgetter
from collections import defaultdict
from flask import request
from werkzeug.exceptions import UnsupportedMediaType, NotAcceptable
def build_groups(acceptable):
"""
Build the group information used by the MimeTypeMatcher
Returns a dictionary of String -> Set, which always includes the default
top type "*/*".
"""
ret = defaultdict(lambda: set(['*']))
ret['*'].add('*')
partitioned = (a.partition('/') for a in acceptable)
for group, partsets in groupby(partitioned, itemgetter(0)):
for parts in partsets:
g, sep, species = parts
ret[group].add(species)
return ret
class MimeTypeMatcher(object):
"""
Matcher that contains the logic for deciding if a mimetype is acceptable.
-------------------------------------------------------------------------
One matcher will be constructed for each route, and used to assess the
mimetypes the client can accept, and determine whether this route can
meet the client's requirements.
"""
def __init__(self, acceptable):
self.acceptable = set(str(a) for a in acceptable)
self.groups = build_groups(self.acceptable)
def is_acceptable(self, mimetype):
"""
Test if a given mimetype is acceptable.
"""
mt = str(mimetype)
if mimetype is None or mt.strip() == '':
return False
if mt in self.acceptable:
return True
genus, _, species = mt.partition('/')
if genus in self.groups:
return species in self.groups[genus]
return False
def consumes(*content_types):
def decorated(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if request.mimetype not in content_types:
raise UnsupportedMediaType()
return fn(*args, **kwargs)
return wrapper
return decorated
def produces(*content_types):
"""
Annotate a route as only acceptable for clients that can accept the given
content types.
--------------------------------------------------------------------------
content_types: list of mimetype strings,
example: ["text/html", "application/json"]
Clients that can accept at least one of the given content types will be
allowed to run the route. This means both exact content type matches as
well as wild card matches, so that a client that accepts '*/*' will always
be permitted access, and one that specifies 'text/*' to an HTML route will
also be allowed to proceed.
"""
def decorated(fn):
matcher = MimeTypeMatcher(content_types)
@wraps(fn)
def wrapper(*args, **kwargs):
requested = set(request.accept_mimetypes.values())
acceptable = filter(matcher.is_acceptable, requested)
if len(acceptable) == 0:
raise NotAcceptable()
return fn(*args, **kwargs)
return wrapper
return decorated
|
{"/tests.py": ["/flask_negotiate2.py"]}
|
31,611
|
Vrcelj1979/List-of-topics-and-topic-details
|
refs/heads/master
|
/handlers/topics.py
|
from handlers.base import BaseHandler
from google.appengine.api import users
from models.topic import Topic
class TopicAdd(BaseHandler):
def get(self):
return self.render_template("topic_add.html")
def post(self):
user = users.get_current_user()
if not user:
return self.write("Please login before you're allowed to post a topic.")
title = self.request.get("title")
text = self.request.get("text")
new_topic = Topic(title=title, content=text, author_email=user.email())
new_topic.put() # put() saves the object in Datastore
return self.redirect_to("topic-details")
class TopicDetailsHandler(BaseHandler):
def get(self):
new_topic = Topic.query().fetch()
params = {"Topic": new_topic}
return self.render_template("topic_details.html", params=params)
class TitleTopicHandler(BaseHandler):
def get(self):
sporocilo = Topic.query().fetch()
params = {"Topic": sporocilo}
return self.render_template("title.html", params=params)
|
{"/main.py": ["/handlers/topics.py"]}
|
31,612
|
Vrcelj1979/List-of-topics-and-topic-details
|
refs/heads/master
|
/main.py
|
#!/usr/bin/env python
import webapp2
from handlers.base import CookieAlertHandler, MainHandler
from handlers.topics import TopicAdd, TopicDetailsHandler, TitleTopicHandler
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name="main-page"),
webapp2.Route('/topic/add', TopicAdd, name="topic-add"),
webapp2.Route('/topic-details', TopicDetailsHandler, name="topic-details"),
webapp2.Route('/title', TitleTopicHandler, name="title"),
webapp2.Route('/set-cookie', CookieAlertHandler, name="set-cookie"),
], debug=True)
|
{"/main.py": ["/handlers/topics.py"]}
|
31,618
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/feature_descriptor.py
|
"""
Class to compute Feature Descriptors for each model.
"""
import img_util as iu
import config
import os
import json
import operator
import math
import numpy as np
from skimage import feature
from skimage import io
from skimage.feature import hog
from skimage.transform import rescale
class feature_descriptor:
@staticmethod
def lbp(image, method="default"):
"""
Returns the LBP Image for the grayscale image provided, with pixel radius 2 and 8 neighbours.
:param image: Grayscale Image for which LBP is to be computed
:param method: The LBP method type as listed in feature.local_binary_pattern function()
:return: LBP Image
"""
radius = 2
pixel_neighbour = 8
lbp_image = feature.local_binary_pattern(image, pixel_neighbour, radius, method=method)
# print(lbp_image.shape)
return lbp_image
@staticmethod
def lbp_feat_desc(img_id):
"""
Prints the LBP histogram vector for a given Image ID.
:param img_id: The image ID of the image to be processed.
:return: None
"""
print("\nIn LBP, ", img_id)
image = iu.img_util.open_image_grayscale(config.IMG_LIST_PATH + img_id + '.jpg')
iu.img_util.display_image(image, 'Image: ' + img_id)
block_wt = block_ht = 100 # Size of a window = 100x100
unified_feat_desc = [] # List to store the unified feature descriptors from each block
bins = 10 # Since in uniform LBP method, there are only 9 uniform binary values and 1 more for all non-uniform bit patterns
i = 0
# Prints the Feat Descriptors computed for each block.
print('\n\n\nHistogram for Image: ', img_id)
for x in range(0, image.shape[0], block_wt):
for y in range(0, image.shape[1], block_ht):
i += 1
blocks = image[x: (x + block_wt), y: (y + block_ht)] # Extracts 100x100 block from image
# Calculates LBP for each pixel, since it is uniform the bit pattern is right shifted and we will have only 9 possible values
lbp_img = feature_descriptor.lbp(blocks, "uniform")
hist_vect = iu.img_util.hist(lbp_img, bins) # Calculates histogram for each window
unified_feat_desc.append(hist_vect) # Stores the histogram vector for each window
print('\n\nBlock ', i, ':')
print(hist_vect[0])
# histogram = np.unique(lbp_img, return_counts=True)
@staticmethod
def hog_feat_desc(img_id):
"""
Prints the HOG feature descriptor vector.
:param img_id: Image ID of image to be processed.
:return: None
"""
image = io.imread(config.IMG_LIST_PATH + img_id + '.jpg')
iu.img_util.display_image(image, 'Image: ' + img_id)
rescaled_image = rescale(image, 0.1, anti_aliasing=True)
hog_vec, hog_img = hog(rescaled_image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), visualize=True, multichannel=True)
print('\nHOG vector calculated: ', hog_vec, '\nHOG Image: \n', hog_img)
iu.img_util.display_image(hog_img, 'HOG Image')
@staticmethod
def compute_lbp_vec(path):
"""
Computes the LBP feature descriptor for all the images in the provided path.
:param path: Path of directory containing images.
:return: None
"""
LBP_map = {} # Will store list of images paired with their corresponding descriptors
for img_file in os.listdir(path):
image = iu.img_util.open_image_grayscale(path + '/' + img_file)
# print('Image File: ', path + '/' + img_file)
# print('Image: ', image)
block_wt = block_ht = 100
unified_feat_desc = []
bins = 10
# Generate histogram for each block
for x in range(0, image.shape[0], block_wt):
for y in range(0, image.shape[1], block_ht):
blocks = image[x: (x + block_wt), y: (y + block_ht)]
lbp_img = feature_descriptor.lbp(blocks, "uniform")
hist_vect = iu.img_util.hist(lbp_img, bins)
unified_feat_desc.append(hist_vect[0])
LBP_map[img_file] = (np.array(unified_feat_desc)).tolist()
# Store the unified descriptor in JSON file
feature_descriptor_file = config.FEAT_DESC_DUMP + 'lbp.json'
with open(feature_descriptor_file, 'w', encoding='utf-8') as outfile:
json.dump(LBP_map, outfile, ensure_ascii=True, indent=2)
@staticmethod
def compute_hog_vec(path):
"""
Computes HOG feature descriptor for all the images in the provided path.
:param path: Path of the directory containing images.
:return: None
"""
HOG_map = {}
for img_file in os.listdir(path):
image = io.imread(config.IMG_LIST_PATH + img_file)
iu.img_util.display_image(image, 'Image: ' + img_file.replace('.jpg', ''))
rescaled_image = rescale(image, 0.1, anti_aliasing=True)
hog_vec, hog_img = hog(rescaled_image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2),
visualize=True, multichannel=True)
HOG_map[img_file.replace('.jpg', '')] = hog_vec.tolist()
feature_descriptor_file = config.FEAT_DESC_DUMP + 'hog.json'
with open(feature_descriptor_file, 'w', encoding='utf-8') as outfile:
json.dump(HOG_map, outfile, ensure_ascii=True, indent=2)
@staticmethod
def fetch_img_desc(img_id, model_ch):
if model_ch == '1':
with open(config.FEAT_DESC_DUMP + "lbp.json", "r") as outfile:
unified_hist = json.load(outfile)
return unified_hist[img_id + '.jpg']
else:
with open(config.FEAT_DESC_DUMP + "hog.json", "r") as outfile:
unified_hist = json.load(outfile)
return unified_hist[img_id]
@staticmethod
def k_similar_imgs(query_img_id, model_ch, k):
"""
Returns the K most similar images to a chosen image
:param img_id: Image ID of the image
:return: None
"""
query_img_vec = feature_descriptor.fetch_img_desc(query_img_id, model_ch)
query_img_vec_flat = np.ravel(query_img_vec)
match_scores = {}
for img_file in os.listdir(config.TEST_IMGS_PATH):
img_file_id = img_file.replace('.jpg', '')
if img_file_id != query_img_id:
img_vec = feature_descriptor.fetch_img_desc(img_file_id, model_ch)
img_vec_flat = np.ravel(img_vec)
if model_ch == '2':
sq_diff = np.square(np.subtract(img_vec_flat, query_img_vec_flat))
match_scores[img_file_id] = math.sqrt(np.sum(sq_diff))
else:
# print('\ncosine similarity: ', np.dot(img_vec_flat, query_img_vec_flat) / (np.sqrt(img_vec_flat.dot(img_vec_flat)) * np.sqrt(query_img_vec_flat.dot(query_img_vec_flat))))
sq_diff = np.square(np.subtract(img_vec_flat, query_img_vec_flat))
match_scores[img_file_id] = math.sqrt(np.sum(sq_diff))
# match_scores[img_file_id] = np.dot(img_vec_flat, query_img_vec_flat) / (np.sqrt(img_vec_flat.dot(img_vec_flat)) * np.sqrt(query_img_vec_flat.dot(query_img_vec_flat)))
sorted_scores = sorted(match_scores.items(), key=operator.itemgetter(1))
query_image = iu.img_util.open_image_grayscale(config.TEST_IMGS_PATH + '/' + query_img_id + '.jpg')
iu.img_util.display_image(query_image, 'Query Image: ' + query_img_id)
i = k
for score in sorted_scores:
if i == 0:
break
print('\nImage ', score[0], ' --> ', score[1])
i -= 1
tmp_img = iu.img_util.open_image_grayscale(config.TEST_IMGS_PATH + '/' + score[0] + '.jpg')
iu.img_util.display_image(tmp_img, str(k-i) + 'th similar img: ' + score[0])
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,619
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/img_util.py
|
# Utility class containing utilities for img processing
import cv2
import numpy as np
from scipy import misc
class img_util:
@staticmethod
def open_image_grayscale(path):
"""
Opens image in grayscale
:param path(string): The complete path where the image can be found
:return(numpy.ndarray): Image as 2D matrix
"""
return cv2.imread(path, cv2.IMREAD_GRAYSCALE)
@staticmethod
def display_image(image, label):
"""
Displays the image in a window with specified label
:param image: Image to be displayed
:param label: Window Label
:return: None
"""
cv2.imshow(label, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
@staticmethod
def hist(img, bins):
"""
Creates a Histogram for the Image passed
:param img: Image as represented as a 2D array.
:param bins: Number of bins in the histogram.
:return: Returns the histogram vector computed for the Image.
"""
flattened_arr = np.ravel(img)
hist_arr = np.histogram(flattened_arr, bins, range=(0, bins)) # The range mentions the min and max value of the hist
return hist_arr
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,620
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/main_ramya.py
|
import cv2
import feature_models
import csv
import glob
import time
import os
import config
task_number = input('Choose from the following options: \n'
'1. Task1: Compute feature descriptor of a single image\n'
'2. Task2: Compute feature descriptors on a folder of images\n'
'3. Task3: Compute the top K similar matches for a given image\n')
if int(task_number) == 1:
feature_model = input('Choose a feature model: \n'
'1) Color Moments \n'
'2) SIFT: \n')
# filename would be the unique image identifier. Hence image_id will be the filename of a image
image_id = input('Enter the file name: ')
# base_path = '/Users/ramya/PycharmProjects/mwdb_phase1/Hands/'
base_path = config.IMG_LIST_PATH
if int(feature_model) == 2:
# Computing feature descriptors for sift model for task 1.
start = time.time()
input_image = cv2.imread(base_path + image_id, 0)
keypoints = feature_models.sift(input_image, image_id)
# Iterating over each keypoint to round the distance to 3 decimals. Easy to read.
for i in keypoints:
for index in range(1, len(i)):
element = i[index]
i[index] = round(element, 3)
csv_writer = open('sift_task1.csv', 'w')
writer = csv.writer(csv_writer)
writer.writerows(keypoints)
print( 'Number of keypoints detected: ', len(keypoints))
print( 'Feature descriptors have been saved to sift_task1.csv file')
print( 'Time taken: ', time.time() - start)
else:
# Computing feature descriptors for color moments for task 1.
start = time.time()
input_image = cv2.imread(base_path + image_id)
feature_descriptors = feature_models.color_moments(input_image, image_id)
csv_writer = open('color_moments_task1.csv', 'w')
writer = csv.writer(csv_writer)
writer.writerow(feature_descriptors)
print( 'Feature descriptor: ', feature_descriptors)
print( 'Feature descriptor has been saved to color_moments_task1.csv file')
print( 'Time taken: ', time.time() - start)
elif int(task_number) == 2:
folder_name = input('Enter the name of the folder: ')
feature_model = input('Choose a feature model: \n'
'1) Color Moments \n'
'2) SIFT: \n')
# base_path = '/Users/ramya/PycharmProjects/mwdb_phase1/'
base_path = config.PROJ_BASE_PATH
path = base_path + folder_name + '/*.jpg'
if int(feature_model) == 2:
# Computing feature descriptors for sift model for task 2.
start = time.time()
# a count variable to keep track of the number of images in the given folder
count = 0
for input_file in glob.glob(path):
count = count + 1
filename = input_file.split('/')[-1]
input_image = cv2.imread(input_file, 0)
keypoints = feature_models.sift(input_image, filename)
# Iterating over each keypoint to round the distance to 3 decimals. Easy to read.
for i in keypoints:
for index in range(1, len(i)):
element = i[index]
i[index] = round(element, 3)
# writing descriptors of all the images in input folder into a single output file.
csv_writer = open('sift_task2.csv', 'a')
writer = csv.writer(csv_writer)
writer.writerows(keypoints)
print( 'Total number of images: ', count)
print( 'Feature descriptors for all the images in the folder', folder_name, 'have been saved to sift_task2.csv file')
print( 'Time taken: ', time.time() - start)
else:
# Computing feature descriptors for color moments model for task 2.
start = time.time()
# a count variable to keep track of the number of images in the given folder
count = 0
for input_file in glob.glob(path):
count = count + 1
filename = input_file.split('/')[-1]
input_image = cv2.imread(input_file)
feature_descriptor = feature_models.color_moments(input_image, filename)
csv_writer = open('color_moments_task2.csv', 'a')
writer = csv.writer(csv_writer)
writer.writerow(feature_descriptor)
print( 'Total number of images: ', count)
print ('Feature descriptors for all the images in the folder', folder_name, 'have been saved to color_moments_task2.csv file')
print( 'Time taken: ', time.time() - start)
elif int(task_number) == 3:
# filename would be the unique image identifier
image_id = input('Enter the file name: ')
feature_model = input('Choose a feature model: \n'
'1) Color Moments \n'
'2) SIFT: \n')
# base_path = '/Users/ramya/PycharmProjects/mwdb_phase1/Hands/'
base_path = config.IMG_LIST_PATH
k_value = int(input('Enter the value of K: '))
if int(feature_model) == 2:
# Computing feature descriptors for sift model for task 3.
start = time.time()
input_image = cv2.imread(base_path + image_id, 0)
keypoints_list_input_image = feature_models.sift(input_image, image_id)
keypoints_map_input_image = {}
# each descriptor is of the form <filename, keypoint_xcoord, keypoint_ycoord, scale, orientation, d1 ... d128>
# Only the descriptor <d1...d128> is added to the map
for value in keypoints_list_input_image:
keypoints_map_input_image.setdefault(value[0], []).append(value[5:])
csv_file = open('sift_task2.csv', 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
keypoints_map_all_test_images = {}
# each line is of the form <filename, keypoint_xcoord, keypoint_ycoord, scale, orientation, d1 ... d128>
# Only the descriptor <d1...d128> is added to the map
for line in csv_reader:
keypoints_map_all_test_images.setdefault(line[0], []).append([float(i) for i in line[5:]])
ordered_image_list = feature_models.compute_similarity_scores_sift(keypoints_map_input_image,
keypoints_map_all_test_images)
# extracting the top k images from the list
top_k_images = ordered_image_list[:k_value]
# a method that plots multiple images in a single window
feature_models.display_image_grid(k_value, top_k_images, base_path)
print(top_k_images)
print( 'Time taken: ', time.time() - start)
else:
# Computing feature descriptors for color moments model for task 3.
start = time.time()
input_image = cv2.imread(base_path + image_id)
input_image_descriptor = feature_models.color_moments(input_image, image_id)
descriptor_map_input_image = {}
for value in input_image_descriptor:
descriptor_map_input_image.setdefault(image_id, []).append(value)
csv_file = open('color_moments_task2.csv', 'r')
csv_reader = csv.reader(csv_file, delimiter=',')
descriptor_map_all_test_images = {}
# each line is of the form <filename, d1 ... >
# Only the descriptor <d1...> is added to the map
for line in csv_reader:
descriptor_map_all_test_images.setdefault(line[0], []).append([float(i) for i in line[1:]])
distances_list = feature_models.compute_similarity_scores_color_moments(descriptor_map_input_image,
descriptor_map_all_test_images)
# extracting the top k images from the list
top_k_images = distances_list[:k_value]
# a method that plots multiple images in a single window
feature_models.display_image_grid(k_value, top_k_images, base_path)
print (top_k_images)
print ('Time taken: ', time.time() - start)
else:
print("Please choose from the given options")
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,621
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/phase1_main.py
|
"""
Phase 1 Project's main code
"""
import sys
import task1
import task2
import task3
def main():
while True:
print('Choose which task u want to execute: \n(Note - Execute Task 2 before Task 3)')
ch = input('\n1. Task 1\n2. Task 2\n3. Task 3\n4. Exit\nEnter Choice: ')
if ch == '1':
task1.task1()
elif ch == '2':
task2.task2()
elif ch == '3':
task3.task3()
elif ch == '4':
break
pass
if __name__ == '__main__':
sys.exit(main())
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,622
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/task2.py
|
"""Task 2 Code
1. Accepts Path of folder with set of images
2. Accepts the type of model to use (LBP or HOG)
3. Computes feature descriptors for all images in the folder
4. Stores the feature descriptor for each image as JSON in a file corresponding to the chosen model.
"""
import feature_descriptor as fd
def task2():
path = input('\nProvide the folder path: ')
while True:
feat_ch = input('\n\nChoose Model from the Menu\n1. LBP\n2. HOG\n3. Exit\nEnter Choice: ')
if feat_ch == '1':
fd.feature_descriptor.compute_lbp_vec(path)
elif feat_ch == '2':
fd.feature_descriptor.compute_hog_vec(path)
elif feat_ch == '3':
break
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,623
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/task1.py
|
"""Task 1 Code
1. Accepts Image ID from the user
2. Accepts the type of model to use (LBP or HOG)
3. Displays Feature Descriptor in Human Readable Form
"""
import feature_descriptor
def task1():
img_id = input('\nEnter the image ID: ')
while True:
feat_ch = input('\n1. LBP\n2. HOG\n3. Exit\nEnter Choice: ')
if feat_ch == '1':
feature_descriptor.feature_descriptor.lbp_feat_desc(img_id)
elif feat_ch == '2':
feature_descriptor.feature_descriptor.hog_feat_desc(img_id)
elif feat_ch == '3':
break
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,624
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/task3.py
|
"""Task 3 Code
1. Accepts Image ID, Model (LBP or HOG), value 'k'.
2. Finds k most similar images to the given image ID.
3. Displays matching score for each match in the list.
"""
import feature_descriptor as fd
def task3():
query_img_id = input('\nImage ID: ')
k = int(input('\nEnter the number of similar images you want to view: '))
while True:
feat_ch = input('\n\nChoose Model from the Menu\n1. LBP\n2. HOG\n3. Exit\nEnter Choice: ')
if feat_ch == '1' or feat_ch == '2':
fd.feature_descriptor.k_similar_imgs(query_img_id, feat_ch, k)
elif feat_ch == '3':
break
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,625
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/feature_models.py
|
import cv2
import math
import numpy as np
import statistics
from scipy.stats.mstats import skew
import matplotlib.pyplot as plt
# a method that partitions a input image into 100 * 100 windows
def get_100_by_100_windows(input_image):
vertical_partitions = input_image.shape[1] / 100
horizontal_partitions = input_image.shape[0] / 100
windows_set = []
windows_set_1 = np.vsplit(input_image, horizontal_partitions)
for np_array in windows_set_1:
windows_set_2 = np.hsplit(np_array, vertical_partitions)
for i in windows_set_2:
windows_set.append(i)
return windows_set
# computing euclidean distance
def euclidean_distance(v1, v2):
return math.sqrt(sum([(x - y) ** 2 for x, y in zip(v1, v2)]))
# Computes color moments(mean, std, skewness) for the given input image
def color_moments(input_image, filename):
# converting the input image to yuv before computing image color moments
yuv_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2YUV)
windows_set = get_100_by_100_windows(yuv_image)
y_channel_descriptor = []
u_channel_descriptor = []
v_channel_descriptor = []
for i in windows_set:
y_channel = i[:, :, 0]
u_channel = i[:, :, 1]
v_channel = i[:, :, 2]
# computing the mean(first moment) for each channel
first_moment_y = np.mean(y_channel)
first_moment_u = np.mean(u_channel)
first_moment_v = np.mean(v_channel)
# computing the standard deviation(second moment) for each channel
second_moment_y = np.std(y_channel)
second_moment_u = np.std(u_channel)
second_moment_v = np.std(v_channel)
# computing the skewness(third moment) for each channel
third_moment_y = skew(y_channel, axis=None)
third_moment_u = skew(u_channel, axis=None)
third_moment_v = skew(v_channel, axis=None)
# each of the moment value is rounded to three decimals. Easy to read
y_channel_descriptor.extend([round(first_moment_y, 3), round(second_moment_y, 3), round(third_moment_y, 3)])
u_channel_descriptor.extend([round(first_moment_u, 3), round(second_moment_u, 3), round(third_moment_u, 3)])
v_channel_descriptor.extend([round(first_moment_v, 3), round(second_moment_v, 3), round(third_moment_v, 3)])
return [filename] + y_channel_descriptor + u_channel_descriptor + v_channel_descriptor
# Compute similarity score for color moments
def compute_similarity_scores_color_moments(descriptor_map_input_image, descriptor_map_all_test_images):
distances_list = []
# each value is of the form <filename, d1 ... >
# Only the descriptor <d1...> is added to the map
descriptor_list_input_image = descriptor_map_input_image[descriptor_map_input_image.keys()[0]][1:]
for test_image in descriptor_map_all_test_images:
descriptor_test_image = descriptor_map_all_test_images[test_image][0]
distance = euclidean_distance(descriptor_list_input_image, descriptor_test_image)
distances_list.append((test_image, distance))
return sort_list_of_tuples(distances_list)
# Compute sift descriptor
def sift(input_image, image_id):
sift_create = cv2.xfeatures2d.SIFT_create()
key_points, descriptors = sift_create.detectAndCompute(input_image, None)
keypoints_vector = []
for key_point, descriptor in zip(key_points, descriptors):
row = []
# each row is of the form < filename, keypoint_xcoord, keypoint_ycoord, scale, orientation, d1...d128 >
row.extend([image_id, key_point.pt[0], key_point.pt[1], key_point.size, key_point.angle])
row.extend(descriptor.tolist())
keypoints_vector.append(row)
for i in keypoints_vector:
for index in range(1, len(i)):
element = i[index]
i[index] = round(element, 3)
return keypoints_vector
# computes similarity score for sift
def compute_similarity_scores_sift(keypoints_map_input_image, keypoints_map_all_test_images):
keypoints_list_input_image = keypoints_map_input_image[keypoints_map_input_image.keys()[0]]
distances_map = {}
# compute the first closest key point
for test_image in keypoints_map_all_test_images:
keypoints_list_test_image = keypoints_map_all_test_images[test_image]
for keypoint1 in keypoints_list_input_image:
min_distance_from_keypoint1 = float('inf')
for keypoint2 in keypoints_list_test_image:
distance = euclidean_distance(keypoint1, keypoint2)
if distance < min_distance_from_keypoint1:
min_distance_from_keypoint1 = distance
distances_map.setdefault(test_image, []).append(min_distance_from_keypoint1)
mean_distances_list = []
# the mean of all the distances is considered the score for an image
for key in distances_map:
value = distances_map[key]
distance_mean = statistics.mean(value)
mean_distances_list.append((key, distance_mean))
sort_list_of_tuples(mean_distances_list)
return mean_distances_list
# plots multiple images in a single window
def display_image_grid(image_count, top_k_images, base_path):
display_grid = plt.figure(figsize=(20, 20))
column_count = 6
row_count = (image_count / 6) + 1
for itr in range(1, column_count * row_count + 1):
if itr > image_count:
break
file_name = top_k_images[itr - 1][0]
score = top_k_images[itr - 1][1]
image = cv2.imread(base_path + file_name)
ax = display_grid.add_subplot(row_count, column_count, itr)
ax.set_title(file_name + ' Score: ' + str("{0:.3f}".format(score)))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
# sort the list of tupes in ascending order
def sort_list_of_tuples(tuple_list):
tuple_list.sort(key=lambda x: x[1])
return tuple_list
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,626
|
VarunVedant/mwdb-project
|
refs/heads/master
|
/config.py
|
# Project Constants
# Base Path of the project
PROJ_BASE_PATH = '/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/'
# Path to the 11k image dataset
IMG_LIST_PATH = "/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/Hands/"
# IMG_LIST_PATH = "/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/testset4/"
#Path where the feature descriptors are to be stored
FEAT_DESC_DUMP = "/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/task2_feature_descriptors/"
# path for all images used in task 3
TEST_IMGS_PATH = "/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/CSE 515 Fall19 - Smaller Dataset"
# TEST_IMGS_PATH = "/media/binary-war-lord/Files/Personal Stuff/Projects/MWDB/Working_Area/testset4"
|
{"/feature_descriptor.py": ["/img_util.py", "/config.py"], "/main_ramya.py": ["/feature_models.py", "/config.py"], "/phase1_main.py": ["/task1.py", "/task2.py", "/task3.py"], "/task2.py": ["/feature_descriptor.py"], "/task1.py": ["/feature_descriptor.py"], "/task3.py": ["/feature_descriptor.py"]}
|
31,656
|
Allaeddineattia/Vocal_Music_Player
|
refs/heads/master
|
/setup.py
|
from gtts import gTTS
import os
from datetime import datetime
import requests
print('setup mode')
def getPaths():
paths=["./music_mp3","./saves","/usr/lib/vocal_player","/usr/lib/vocal_player/instruction","/usr/lib/vocal_player/minute.mp3","/usr/lib/vocal_player/time.mp3"]
paths.extend(["/usr/lib/vocal_player/midnight.mp3","/usr/lib/vocal_player/clock.mp3","/usr/lib/vocal_player/welcome_to_vocale.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/on.mp3","/usr/lib/vocal_player/instruction/first.mp3","/usr/lib/vocal_player/instruction/create.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/create_0.mp3","/usr/lib/vocal_player/instruction/create_1.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/create_2.mp3","/usr/lib/vocal_player/instruction/creation_option.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/creation_option_0.mp3","/usr/lib/vocal_player/instruction/creation_option_1.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/creation_option_2.mp3","/usr/lib/vocal_player/instruction/creation_option_3.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/creation_option_4.mp3","/usr/lib/vocal_player/instruction/creation_option_6.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/play.mp3","/usr/lib/vocal_player/instruction/play_0.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/play_2.mp3","/usr/lib/vocal_player/instruction/play_3.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/play_1.mp3","/usr/lib/vocal_player/instruction/play_6.mp3","/usr/lib/vocal_player/instruction/play_9.mp3"])
paths.extend(["/usr/lib/vocal_player/instruction/play_8.mp3","/usr/lib/vocal_player/instruction/play_7.mp3",'/usr/lib/vocal_player/instruction/save_playlist_option.mp3'])
paths.extend(['/usr/lib/vocal_player/instruction/save_playlist.mp3','/usr/lib/vocal_player/instruction/save_cancled.mp3','/usr/lib/vocal_player/instruction/save_succeeded.mp3'])
paths.extend(['/usr/lib/vocal_player/instruction/slot_exist.mp3','/usr/lib/vocal_player/instruction/pick_saved.mp3'])
paths.extend(['/usr/lib/vocal_player/instruction/pick_saved_cancled.mp3',])
return(paths)
def connected_to_internet(url='http://www.google.com/', timeout=5):
try:
_ = requests.get(url, timeout=timeout)
return True
except requests.ConnectionError:
print("No internet connection available.")
return False
def make_dir():
print("creating needed repositorys")
if not(os.path.exists("./music_mp3")) :
os.mkdir("./music_mp3",0755)
if not(os.path.exists("./saves")) :
os.mkdir("./saves",0755)
if not(os.path.exists("./music")) :
os.mkdir("./music",0755)
if not(os.path.exists("/usr/lib/vocal_player")) :
os.mkdir("/usr/lib/vocal_player",0755)
if not(os.path.exists("/usr/lib/vocal_player/instruction")) :
os.mkdir("/usr/lib/vocal_player/instruction",0755)
print("repositorys created")
def time():
if not(os.path.exists("/usr/lib/vocal_player/minute.mp3")):
string=gTTS(text="minute", lang='en')
string.save("/usr/lib/vocal_player/minute.mp3")
print("creating "+"/usr/lib/vocal_player/minute.mp3")
##if not(os.path.exists("/usr/lib/vocal_player/pm.mp3")):
# string=gTTS(text="pm", lang='en')
# string.save("/usr/lib/vocal_player/pm.mp3")
if not(os.path.exists("/usr/lib/vocal_player/time.mp3")):
print("creating "+ "/usr/lib/vocal_player/time.mp3")
string=gTTS(text="the time is", lang='en')
string.save("/usr/lib/vocal_player/time.mp3")
for i in range (1,61):
s=str(i)
if not(os.path.exists("/usr/lib/vocal_player/"+s+".mp3")):
print("creating "+ "/usr/lib/vocal_player/"+s+".mp3")
string=gTTS(text=s, lang='en', )
string.save("/usr/lib/vocal_player/"+s+".mp3")
if not(os.path.exists("/usr/lib/vocal_player/midnight.mp3")):
print("creating "+ "/usr/lib/vocal_player/midnight.mp3")
string=gTTS(text="midnight", lang='en')
string.save("/usr/lib/vocal_player/midnight.mp3")
if not(os.path.exists("/usr/lib/vocal_player/clock.mp3")):
print("creating "+ "/usr/lib/vocal_player/clock.mp3")
string=gTTS(text="o'clock", lang='en')
string.save("/usr/lib/vocal_player/clock.mp3")
def instruction():
if not(os.path.exists("/usr/lib/vocal_player/welcome_to_vocale.mp3")):
print("creating "+ "/usr/lib/vocal_player/welcome_to_vocale.mp3")
string=gTTS(text="welcome to C L L vocale music player.", lang='en')
string.save("/usr/lib/vocal_player/welcome_to_vocale.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/on.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/on.mp3")
string=gTTS(text="press 5 to start the qutomatic guidance\n highly recomanded if this your first use", lang='en')
string.save("/usr/lib/vocal_player/instruction/on.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/first.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/first.mp3")
string=gTTS(text="If you're blocked press 5 for help.", lang='en')
string.save("/usr/lib/vocal_player/instruction/first.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/create.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/create.mp3")
string=gTTS(text="press 0 to exit the program \n press 1 to create new list \n or press 2 to chose a saved listes", lang='en')
string.save("/usr/lib/vocal_player/instruction/create.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/create_0.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/create_0.mp3")
string=gTTS(text="exit the program", lang='en')
string.save("/usr/lib/vocal_player/instruction/create_0.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/create_1.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/create_1.mp3")
string=gTTS(text="create new list", lang='en')
string.save("/usr/lib/vocal_player/instruction/create_1.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/create_2.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/create_2.mp3")
string=gTTS(text="chose between saved listes", lang='en')
string.save("/usr/lib/vocal_player/instruction/create_2.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option.mp3")
string=gTTS(text="press 0 to exit the program \npress 1 to add to list\npress 2 to ignore \npress 3 to add all the rest\n press 4 to rehear the name \n 6 to end selection", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_0.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_0.mp3")
string=gTTS(text="exit the program", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_0.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_1.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_1.mp3")
string=gTTS(text="add the track", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_1.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_2.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_2.mp3")
string=gTTS(text="skip to next track", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_2.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_3.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_3.mp3")
string=gTTS(text="add all the rest to playlist", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_3.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_4.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_4.mp3")
string=gTTS(text="rehear the name", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_4.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/creation_option_6.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/creation_option_6.mp3")
string=gTTS(text="end selection", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_option_6.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play.mp3")):
print("creating "+"/usr/lib/vocal_player/instruction/play.mp3" )
string=gTTS(text="press 0 to exit \npress 2 to pause and resume \npress 3 to skip to next \npress 1 to back to previous \npress 6 to volume down \npress 9 to volume up \npress 7 to mute\unmute \npress 8 to hear time \n", lang='en')
string.save("/usr/lib/vocal_player/instruction/play.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_0.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_0.mp3")
string=gTTS(text=" exit the program \n", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_0.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_2.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_2.mp3")
string=gTTS(text="pause and resume ", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_2.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_3.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_3.mp3")
string=gTTS(text="skip to next", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_3.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_1.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_1.mp3")
string=gTTS(text=" back to previous ", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_1.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_6.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_6.mp3")
string=gTTS(text="lower the volume ", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_6.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_9.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_9.mp3")
string=gTTS(text="volume up ", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_9.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_8.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_8.mp3")
string=gTTS(text=" hear time ", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_8.mp3")
if not(os.path.exists("/usr/lib/vocal_player/instruction/play_7.mp3")):
print("creating "+ "/usr/lib/vocal_player/instruction/play_7.mp3")
string=gTTS(text=" mute unmute", lang='en')
string.save("/usr/lib/vocal_player/instruction/play_7.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/save_playlist_option.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/save_playlist_option.mp3')
string=gTTS(text=" press 1 to save the list", lang='en')
string.save("/usr/lib/vocal_player/instruction/save_playlist_option.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/save_playlist.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/save_playlist.mp3')
string=gTTS(text=" Chose a slot from 1 to 9 exept the 5 wich remain used for help or use 0 to cancel", lang='en')
string.save("/usr/lib/vocal_player/instruction/save_playlist.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/save_cancled.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/save_cancled.mp3')
string=gTTS(text="your playlist will not be saved", lang='en')
string.save("/usr/lib/vocal_player/instruction/save_cancled.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/save_succeeded.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/save_succeeded.mp3')
string=gTTS(text="playlist saved in slot number ", lang='en')
string.save("/usr/lib/vocal_player/instruction/save_succeeded.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/auto_on.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/auto_on.mp3')
string=gTTS(text="auto guidance is on", lang='en')
string.save("/usr/lib/vocal_player/instruction/auto_on.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/auto_off.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/auto_off.mp3')
string=gTTS(text="auto guidance is off", lang='en')
string.save("/usr/lib/vocal_player/instruction/auto_off.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/save_playlist_mode.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/save_playlist_mode.mp3')
string=gTTS(text="you are about to save your playlist", lang='en')
string.save("/usr/lib/vocal_player/instruction/save_playlist_mode.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/creation_mode.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/creation_mode.mp3')
string=gTTS(text="you are about to create a playlist", lang='en')
string.save("/usr/lib/vocal_player/instruction/creation_mode.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/help.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/help.mp3')
string=gTTS(text="repress 5 to hear all instruction or press one of the other buttons to hear it funcionality", lang='en')
string.save("/usr/lib/vocal_player/instruction/help.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/slot_exist.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/slot_exist.mp3')
string=gTTS(text="slot exist already", lang='en')
string.save("/usr/lib/vocal_player/instruction/slot_exist.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/pick_saved_cancled.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/pick_saved_cancled.mp3')
string=gTTS(text="pick saved cancled", lang='en')
string.save("/usr/lib/vocal_player/instruction/pick_saved_cancled.mp3")
if not(os.path.exists('/usr/lib/vocal_player/instruction/pick_saved.mp3')):
print("creating "+ '/usr/lib/vocal_player/instruction/pick_saved.mp3')
string=gTTS(text="chose a slot", lang='en')
string.save("/usr/lib/vocal_player/instruction/pick_saved.mp3")
make_dir()
# if (connected_to_internet()==True):
# time()
# instruction()
|
{"/main.py": ["/setup.py"]}
|
31,657
|
Allaeddineattia/Vocal_Music_Player
|
refs/heads/master
|
/main.py
|
try :
import setup
except:
print('\n!!!!!!!\nsuper user mode is needed.\nre-execute the program as superuser.\nsudo python main.py\n!!!!!!!!')
exit(0)
from gtts import gTTS
from playsound import playsound
import os
from random import shuffle
import requests
class autoInstruct:
value=False
#------------------verification de l'existance des fichiers setup---------------------
def checkCreatedInstruction(path):
if not (os.path.exists(path)):
print(path+ " missing")
return(1)
else :
if (os.path.getsize(path) == 0):
print(path+ " not created well")
try :
os.system("sudo rm "+path)
except :
print('\n!!!!!!!\nsuper user mode is needed.\nre-execute the program as superuser.\nsudo python main.py\n!!!!!!!!')
exit(0)
return(1)
print(path + ' is cheked.')
return(0)
def missingInstruction():
missingInstructions=0
paths=setup.getPaths()
for path in paths:
missingInstructions += checkCreatedInstruction(path)
for i in range (1,61):
s=str(i)
missingInstructions += checkCreatedInstruction("/usr/lib/vocal_player/"+s+".mp3")
return(missingInstructions)
#----------------verification des dossiers du sauvgarde
#------------------fct pour sauvgarder une liste dans fichier---------------------
def saveListe(liste,path):
fp=open(path,'w')
fp.writelines(liste)
fp.close
#------------------fct pour sauvgarder une liste dans un slots---------------------
#sauvegarde dans the least aded
def savePlaylist(play_liste):
existsSlots=os.popen("ls ./saves")
if (autoInstruct.value):
playsound('/usr/lib/vocal_player/instruction/save_playlist.mp3')
while True:
try:
number=input("0:exit \nelse exept 5\ninput: ")
break
except :
print('please use a number.')
while True:
d=1
while (number==5):
playsound('/usr/lib/vocal_player/instruction/save_playlist_mode.mp3')
playsound('/usr/lib/vocal_player/instruction/save_playlist.mp3')
while True:
try:
number=input("0:exit \n else exept 5\ninput: ")
break
except :
print('please use a number.')
if (number==0):
playsound('/usr/lib/vocal_player/instruction/save_cancled.mp3')
print("save cancled")
break
else :
if (os.path.exists('./saves/'+str(number))):
playsound('/usr/lib/vocal_player/instruction/slot_exist.mp3')
date_modification=os.popen("stat ./saves/"+str(number)+" |grep Modify |cut -d \ -f 2").read()
heure_modification=os.popen("stat ./saves/"+str(number)+" |grep Modify |cut -d \ -f 3").read()
print(date_modification)#months sound
print(heure_modification)
playsound('/usr/lib/vocal_player/instruction/save_playlist.mp3')
while True:
try:
d=input('press one to ecrase the old one\ninput: ')
break
except :
print('please use a number.')
if d==1:
saveListe(play_liste,'./saves/'+str(number))
playsound('/usr/lib/vocal_player/instruction/save_succeeded.mp3')
playsound('/usr/lib/vocal_player/'+str(number)+'.mp3')
break
while True:
try:
number=input("0:exit \n else exept 5\ninput: ")
break
except :
print('please use a number.')
#------------------fct pour charger une liste---------------------
def loadList(path):
if not(os.path.exists(path)) :
print("path does not exist :/\n")
return([])
else:
fp=open(path,'r')
l=fp.readlines()
fp.close
return(l)
#------------------fct pour fermer le programme---------------------
def killall():
f=open("/tmp/ps_pid",'r')
l=f.readlines()
f.close()
f=open("/tmp/music_played",'w')
f.write("")
f.close()
for i in l :
os.system("kill -9 "+i)
#------------------fct pour ecouter les instruction de la creation d'une liste ---------------------
def createOptionInstruction():
playsound('/usr/lib/vocal_player/instruction/creation_mode.mp3')
playsound('/usr/lib/vocal_player/instruction/help.mp3')
while True:
try:
op=input()
break
except :
print('please use a number.')
if(op in (0,1,2,3,4,6)):
playsound('/usr/lib/vocal_player/instruction/creation_option_'+str(op)+'.mp3')
else:
playsound('/usr/lib/vocal_player/instruction/creation_option.mp3')
#------------------fct pour creer une liste --------------------------------------------------------
def createList(musicList) :
final=[]
for originTrackPath in musicList:
trackPath=originTrackPath.split("/")
trackName=trackPath[-1]
trackName=trackName.replace(".mp3","")
print(trackName)
playsound("./music_mp3/name_"+trackName+".mp3")
while True:
try:
x=int(input("1:take \n2:leave\n3:take_all\n0:cancel\n6:selection complet\n4:rehear the name\n5:instruction\ninput: "))
break
except :
print('please use a number')
while x not in (0,1,2,6,3) :
if x==4 :
playsound("./music_mp3/name_"+trackName+".mp3")
if x==5 :
createOptionInstruction()
while True:
try:
x=int(input("1:take \n2:leave\n3:take_all\n0:cancel\n6:selection complet\n4:rehear the name\n5:instruction\n"))
break
except :
print('please use a number')
if x==0 :
return([])
if x==1 :
final.append(originTrackPath)
if x==3 :
dif=[var for var in musicList if var not in final]
shuffle(dif)
final.extend(dif)
break
if x==6 :
break
return(final)
#------------------fct pour adapter une chaine de caractere a le shell ---------------------
def adapt_chaine(chaine):
liste_des_carac_special=[' ','"','/','<','>','|',':','&','-',"(",")","'"]
for i in liste_des_carac_special:
chaine=chaine.replace(i,'\\'+i)
chaine=chaine.replace('\n','')
return(chaine)
#------------------fct pour ecouter un fichier mp3 ---------------------
def playMusic():
os.system("python play.py & python con.py ")
#------------------fct pour choisir un slot ---------------------
def pickSaved():
playsound('/usr/lib/vocal_player/instruction/pick_saved.mp3')
out=os.popen("ls -t ./saves").read()
slots=out.split("\n")
slots=slots[:-1]
print("slots: ",slots)
while True:
try:
saved_number=input()
break
except :
print('use a number')
while ((str(saved_number)not in slots) or (saved_number==5) or (saved_number==0) ):
if(saved_number==5):
playsound('/usr/lib/vocal_player/instruction/pick_saved.mp3')
for slot in slots:
playsound("/usr/lib/vocal_player/"+slot+".mp3")
if (saved_number==0) :
playsound('/usr/lib/vocal_player/instruction/pick_saved_cancled.mp3')
break
if (str(saved_number)not in slots):
#playsound('/usr/lib/vocal_player/instruction/slot_not_found.mp3')
print("!!! slot not found !!!")
while True:
try:
saved_number=input()
break
except :
print('use a number')
return(loadList('./saves/'+str(saved_number)))
#------------------fct pour extraire le tag name et le artisite d'apres un fichier mp3---------------------
def getTag(pistePath):
title=""
# print("before "+pistePath)
pistePath=adapt_chaine(pistePath)
# print("after "+pistePath)
title=os.popen("mp3info -p %t "+pistePath).read()
artist=os.popen("mp3info -p %a "+pistePath).read()
if (title!=""):
if (artist!=""):
return(title+" by "+artist)
return(title)
#------- obtention des paths du piste musicaux
def findMusicTrack():
music_founded=os.popen("find ./music -type f -iname \"*.mp3\" -printf \"%CY %Cj %CT %p \n\" | sort -r -k1,1 -k2,2 -k3,3 | cut -d \ -f5-").read()
music_founded=music_founded.split('\n')
music_founded=music_founded[:-1]
for i in range(len(music_founded)) :
music_founded[i]=music_founded[i][:-1]
return(music_founded)
#------- creation des fichiers mp3 contenants les noms des pistes trouver
def createTrackName(musicFounded):
missingFile=0
for pistePath in musicFounded :
pisteTag=getTag(pistePath)
pistePath=pistePath.split("/")
pisteName=pistePath[-1]
pisteName=pisteName.replace(".mp3","")
if (pisteTag==""):
pisteTag=pisteName.replace("_"," ")
if not(os.path.exists("./music_mp3/name_"+pisteName+".mp3")):
if (setup.connected_to_internet):
string=gTTS(text=pisteTag.replace(".mp3",""), lang='en')
string.save("./music_mp3/name_"+pisteName+".mp3")
else:
missingFile+=1
print(str(missingFile)+" track is missing")
#----------------- get auto instruction option
def autoInstruction():
playsound('/usr/lib/vocal_player/instruction/on.mp3')
while True:
try:
option=int(input('5 to automatic\ninput: '))
break
except:
print("use a number please")
if(option==5):
autoInstruct.value=True
#---------------------------------------------------------------------
#--------------------main------------------------------------------------
#-----------------------------------------------------------------------
# --------------------verification des fichier system
while(missingInstruction()!=0):
print('there is '+str(missingInstruction())+' configuration file/s not found')
if (setup.connected_to_internet()):
print('setup mode will be executed please wait...')
try:
setup.time()
setup.instruction()
except :
print('\n!!!!!!!\nsuper user mode is needed.\nre-execute the program as superuser.\nsudo python main.py\n!!!!!!!!')
exit(0)
else:
print('internet conection is needed')
exit(-4)
print('all needed files exists')
#----------------message de reciption
playsound('/usr/lib/vocal_player/welcome_to_vocale.mp3')
#---------les piles des piste musicaux
musicToPlay=[]
music_played=[]
musicFounded=findMusicTrack()
if (musicFounded==[]):
print('no music founded')
exit(4)
createTrackName(musicFounded)
autoInstruction()
playsound('/usr/lib/vocal_player/instruction/first.mp3')
if (autoInstruct.value):
playsound('/usr/lib/vocal_player/instruction/auto_on.mp3')
else:
playsound('/usr/lib/vocal_player/instruction/auto_off.mp3')
if (autoInstruct.value):
playsound('/usr/lib/vocal_player/instruction/create.mp3')
op=5
while op not in (0,2,1):
while True:
try:
op=int(input("0:exit\n1:create a list\n2:chose a saved list\n5:instructions\ninput: "))
break
except :
print('please use a number ')
if (op==5):
playsound('/usr/lib/vocal_player/instruction/help.mp3')
while True:
try:
op=input("0:exit\n1:create a list\n2:chose a saved list\n5:instructions\ninput: ")
break
except :
print ('use a number')
if (op==5):
playsound('/usr/lib/vocal_player/instruction/create.mp3')
else:
playsound('/usr/lib/vocal_player/instruction/create_'+str(op)+'.mp3')
op=5
if op == 0:
exit(-4)
if op == 1:
playsound('/usr/lib/vocal_player/instruction/creation_mode.mp3')
if (autoInstruct.value):
playsound('/usr/lib/vocal_player/instruction/creation_option.mp3')
musicToPlay=createList(musicFounded)
musicToPlay=musicToPlay[::-1]
x='\n\t'.join(musicToPlay)
musicToPlay=x.split('\t')
playsound('/usr/lib/vocal_player/instruction/save_playlist_option.mp3')
while True:
try:
saveOption=int(input('1: save\ninput: '))
break
except:
print('please a number')
if (saveOption==1):
playsound('/usr/lib/vocal_player/instruction/save_playlist_mode.mp3')
savePlaylist(musicToPlay)
if op == 2:
musicToPlay=pickSaved()
print("\nMusic to be played:\n")
for musicTrack in musicToPlay:
print(musicTrack)
saveListe(musicToPlay,'/tmp/music_to_play')
if (autoInstruct.value):
playsound("/usr/lib/vocal_player/instruction/play.mp3")
while (os.path.getsize('/tmp/music_to_play') > 0):
musicToPlay=loadList('/tmp/music_to_play')
playMusic()
print('fin')
killall()
|
{"/main.py": ["/setup.py"]}
|
31,664
|
aakarpanth2003/turtleartdesign
|
refs/heads/master
|
/myFunctions.py
|
def polygon(t, s, d):
a = 360/s
for times in range(s):
t.forward(d)
t.right(a)
def jump(t,x,y):
t.penup()
t.goto(x,y)
t.pendown()
def cool(t,d,c1,c2):
t.color(c1)
polygon(t,4,d)
t.color(c2)
polygon(t,3,d)
|
{"/project.py": ["/myFunctions.py"]}
|
31,665
|
aakarpanth2003/turtleartdesign
|
refs/heads/master
|
/project.py
|
import turtle
from myFunctions import*
from random import*
turtle.colormode(255)
ak = turtle.Turtle()
x = (randint(0,350))
y = (randint(0,350))
ak.width(1)
ak.speed(187348458)
turtle.bgcolor("black")
for times in range(100):
ak.color(times,255,255-times)
ak.circle(80-times)
ak.right(1)
ak.color(130,150,255-times)
ak.circle(50-times)
ak.left(3)
ak.forward(1)
ak.left(1)
ak.circle(times)
ak.forward(3)
ak.left(2)
ak.circle(times)
ak.forward(5)
ak.penup()
ak.goto(0,350)
ak.pendown()
for times in range(12):
ak.color("cyan")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
ak.penup()
ak.goto(-200,200)
ak.pendown()
for times in range(12):
ak.color("magenta")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
ak.penup()
ak.goto(-30,-350)
ak.pendown()
for times in range(12):
ak.color("pink")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
ak.penup()
ak.goto(400,200)
ak.pendown()
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(-400,-200)
ak.pendown()
ak.color("purple")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(-400,200)
ak.pendown()
ak.color("pink")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(400,-200)
ak.pendown()
for times in range(12):
ak.color("red")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
ak.penup()
ak.goto(600,-400)
ak.pendown()
ak.color("cyan")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(-600,400)
ak.pendown()
ak.color("cyan")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(-600,-400)
ak.pendown()
ak.color("cyan")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(600,400)
ak.pendown()
ak.color("violet")
for times in range(8):
ak.forward(100)
ak.right(135)
ak.penup()
ak.goto(600,0)
ak.pendown()
for times in range(12):
ak.color("cyan")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
ak.penup()
ak.goto(-600,0)
ak.pendown()
for times in range(12):
ak.color("magenta")
ak.left(1)
ak.forward(75)
ak.right(175)
ak.circle(10)
ak.forward(20)
ak.left(20)
ak.forward(50)
ak.right(997)
ak.circle(10)
ak.left(90)
ak.forward(10)
ak.right(6755)
ak.circle(20)
ak.forward(50)
ak.right(875)
ak.circle(10)
|
{"/project.py": ["/myFunctions.py"]}
|
31,760
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/stock_trends.py
|
import pandas as pd
import numpy as np
from pandas import json_normalize
#from selenium.webdriver.support.expected_conditions import element_selection_state_to_be
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
import base64
import plotly.express as px
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import matplotlib.pyplot as plt
import io
from math import floor
##stock packages
import yfinance as yfin
import finnhub
## web crawling packages
import time
from datetime import date
from datetime import datetime
import datetime
import requests
from lxml import html
import csv
##functions
def get_table_download_link(df, filename='download', message='Download csv result file'):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="{filename}.csv" >{message}</a>'
return href
def app():
#st.title('Stock Trends')
st.write("""
# Coming Soon!
""")
construction = Image.open('construction.jpg')
st.image(construction, caption='Page Under Construction...Come Back Later', width=680)
#st.write("""## Data Sources:""")
#st.write("""1.) yfinance python package""")
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,761
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/stock_analysis.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 10:10:39 2021
@author: ROBERTLJ
"""
import pandas as pd
import numpy as np
from pandas import json_normalize
import json
#from selenium.webdriver.support.expected_conditions import element_selection_state_to_be
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
import base64
import plotly.express as px
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import matplotlib.pyplot as plt
import io
from math import floor
## sentiment analysis
import re
import nltk
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
##stock packages
import yfinance as yfin
import finnhub
## web crawling packages
import time
from datetime import date
from datetime import datetime
import datetime
import requests
from lxml import html
import csv
##functions
def get_table_download_link(df, filename='download', message='Download csv result file'):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="{filename}.csv" >{message}</a>'
return href
def RSI(data, time_window):
diff = data.diff(1).dropna() # diff in one field(one day)
#this preservers dimensions off diff values
up_chg = 0 * diff
down_chg = 0 * diff
# up change is equal to the positive difference, otherwise equal to zero
up_chg[diff > 0] = diff[ diff>0 ]
# down change is equal to negative deifference, otherwise equal to zero
down_chg[diff < 0] = diff[ diff < 0 ]
# check pandas documentation for ewm
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html
# values are related to exponential decay
# we set com=time_window-1 so we get decay alpha=1/time_window
up_chg_avg = up_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
down_chg_avg = down_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
rs = abs(up_chg_avg/down_chg_avg)
rsi = 100 - 100/(1+rs)
return rsi
def get_macd(price, slow, fast, smooth):
exp1 = price.ewm(span = fast, adjust = False).mean()
exp2 = price.ewm(span = slow, adjust = False).mean()
macd = pd.DataFrame(exp1 - exp2).rename(columns = {'close':'macd'})
signal = pd.DataFrame(macd.ewm(span = smooth, adjust = False).mean()).rename(columns = {'macd':'signal'})
hist = pd.DataFrame(macd['macd'] - signal['signal']).rename(columns = {0:'hist'})
frames = [macd, signal, hist]
df = pd.concat(frames, join = 'inner', axis = 1)
return df
def get_option_chain(ticker_desc):
## running stock option scraping
base1 = "https://api.nasdaq.com/api/quote/"
base2 = "/option-chain?assetclass=stocks&fromdate=all&todate=undefined&excode=oprac&callput=callput&money=all&type=all"
url = base1 + str(ticker_desc) + base2
payload={}
headers = {
'User-Agent': 'PostmanRuntime/7.28.4',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Length': '1970',
'Connection': 'keep-alive'
}
response = requests.request("GET", url, headers=headers, data=payload)
response_text = json.loads(response.text)
price = json_normalize(response_text['data'])
price = pd.DataFrame(price['lastTrade'], columns=['lastTrade'])
price[['last', 'trade', 'price', 'as', 'of', 'sep', 'day', 'year']]= price["lastTrade"].str.split(" ", expand = True)
price_new = price['price'].str[1:][0]
price_new = float(price_new)
option_data = json_normalize(response_text['data']['table'], 'rows')
option_data = option_data.drop(columns = ['drillDownURL'])
option_data['expirygroup'] = option_data['expirygroup'].replace('', np.nan).ffill()
option_data['expirygroup'] = pd.to_datetime(option_data['expirygroup'])
option_data = option_data.dropna(subset = ['strike'])
calls = option_data[['expirygroup', 'expiryDate', 'c_Last', 'c_Change', 'c_Bid', 'c_Ask', 'c_Volume', 'c_Openinterest', 'c_colour', 'strike']].copy()
calls = calls.rename(columns = {'c_Last': 'Last', 'c_Change': 'Change', 'c_Bid': 'Bid', 'c_Ask': 'Ask', 'c_Volume': 'Volume', 'c_Openinterest': 'Openinterest', 'c_colour': 'colour'})
calls['type'] = "Call"
calls['strike'] = calls['strike'].astype(float)
calls['money_category'] = np.where(calls['strike'] <= price_new, "In the Money", "Out of the Money")
puts = option_data[['expirygroup', 'expiryDate', 'p_Last', 'p_Change', 'p_Bid', 'p_Ask', 'p_Volume', 'p_Openinterest', 'p_colour', 'strike']].copy()
puts = puts.rename(columns = {'p_Last': 'Last', 'p_Change': 'Change', 'p_Bid': 'Bid', 'p_Ask': 'Ask', 'p_Volume': 'Volume', 'p_Openinterest': 'Openinterest', 'p_colour': 'colour'})
puts['type'] = "Put"
puts['strike'] = puts['strike'].astype(float)
puts['money_category'] = np.where(puts['strike'] >= price_new, "In the Money", "Out of the Money")
option_data_new = calls.append(puts)
option_data_new = option_data_new.replace('--', 0)
option_data_new['Last'] = option_data_new['Last'].astype(float)
option_data_new['Change'] = option_data_new['Change'].astype(float)
option_data_new['Bid'] = option_data_new['Bid'].astype(float)
option_data_new['Ask'] = option_data_new['Ask'].astype(float)
option_data_new['Volume'] = option_data_new['Volume'].astype(float)
option_data_new['Openinterest'] = option_data_new['Openinterest'].astype(float)
option_data_new['strike'] = option_data_new['strike'].astype(float)
option_data_new['new_date']= option_data_new['expirygroup']
#option_data_new['expirygroup'] = option_data_new['expirygroup'].astype(str)
maxStrikeValue = option_data_new['strike'].max()
minStrikeValue = option_data_new['strike'].min()
twenty_fifth_per = np.percentile(option_data_new['strike'], 25)
seventy_fifth_per = np.percentile(option_data_new['strike'], 75)
first_date = option_data_new['expirygroup'].head(1)
last_date = option_data_new['expirygroup'].tail(1)
start_date = pd.to_datetime(first_date.unique()[0])
end_date = pd.to_datetime(last_date.unique()[0])
return option_data, calls, puts, option_data_new, maxStrikeValue, minStrikeValue, twenty_fifth_per, seventy_fifth_per, start_date, end_date
def app():
##Setting Streamlit Settings
#st.set_page_config(layout="wide")
##importing files needed for web app
ticker_selection = pd.read_csv('data/tickers_only.csv')
filler_df = pd.DataFrame({"full": ['Please Search for a Stock'],"url": 'Please Search for a Stock', "ticker": 'Please Search for a Stock', "company_name": 'Please Search for a Stock'})
today = date.today()
month_ago = today - datetime.timedelta(days=31)
two_months_ago = today - datetime.timedelta(days=62)
year_ago = today - datetime.timedelta(days=365)
unixtime_today = time.mktime(today.timetuple())
unixtime_year = time.mktime(year_ago.timetuple())
ticker_selection = filler_df.append(ticker_selection)
col1, col2 = st.beta_columns(2)
## Streamlit
with col1:
st.write("""
# Superior Returns Stock Exploration Application
""")
st.write("""## Data Sources:""")
st.write("""1.) finnhub python package""")
st.write("""2.) https://stockanalysis.com/stocks/ used for crawling avaialble stock tickers.""")
#st.write("""3.) https://www.nasdaq.com/market-activity/stocks/ used for scrapping option chain data.""")
#st.write("""4.) https://www.nerdwallet.com/article/investing/options-trading-definitions used for understanding option chain data terms.""")
#with col2:
#bull = Image.open('image.jpg')
#st.image(bull, caption='Superior Returns', use_column_width=True) #use_column_width=True, width = 100)
## Need to make container and columns for Filters
st.write("## Filters")
filter_expander = st.beta_expander(" ", expanded=True)
with filter_expander:
col3, col4 = st.beta_columns(2)
with col3:
pick_ticker = st.selectbox("Select Stock Ticker", ticker_selection["full"].unique())
#pick_ticker_all = pick_ticker
st.write("You have selected", pick_ticker)
if pick_ticker == "Please Search for a Stock":
pass
else:
with col4:
st.write(pick_ticker[0])
ticker_row_selected = ticker_selection.loc[ticker_selection["full"] == pick_ticker]
ticker_desc = ticker_row_selected['ticker'].unique()
ticker_desc = ticker_desc[0]
st.write("Ticker Symbol", ticker_desc, ".")
ticker_url = ticker_row_selected['url'].unique()
ticker_url = ticker_url[0]
st.write("Ticker Url", ticker_url)
ticker = yfin.Ticker(ticker_desc)
logo = ticker.info
logo = json_normalize(logo)
logo = logo['logo_url']
logo = logo[0]
response = requests.get(logo)
image_bytes = io.BytesIO(response.content)
img = Image.open(image_bytes)
st.image(img)
if pick_ticker == "Please Search for a Stock":
pass
else:
st.write("## Price Performance")
price_movement_expander = st.beta_expander(' ', expanded=True)
##making options to show different graphs
period_list = {'Period':['1 Week', '1 Month', '3 Months', '6 Months', '1 Year'], 'Period_value':[5, 23, 69, 138, 250]}
period_dict = pd.DataFrame(period_list)
with price_movement_expander:
col5, col6 = st.beta_columns((1,3))
with col5:
period_selection = st.selectbox("Select Time Period", period_dict['Period'].unique())
period_row_selected = period_dict.loc[period_dict["Period"] == period_selection]
period_desc = period_row_selected['Period_value'].unique()
period_desc = period_desc[0]
chart_selection = st.radio("Pick Which Stock Price Analysis you would like to look at", ("Candles", "MACD (Moving Average Convergence Divergence)", "RSI (Relative Strength Indictor)", "All"))
with col6:
# Setup client
finnhub_client = finnhub.Client(api_key="c3qcjnqad3i9vt5tl68g")
res = finnhub_client.stock_candles(ticker_desc, 'D', int(unixtime_year), int(unixtime_today))
price_data = pd.DataFrame(res)
price_data.columns = ['close', 'high', 'low', 'open', 'status', 'timestamp', 'volume']
price_data['date'] = pd.to_datetime(price_data['timestamp'], unit='s')
price_data.index = price_data['date']
price_data = price_data.drop(columns=['date'])
price_data['RSI'] = RSI(price_data['close'], 14)
price_data['30_ma'] = price_data['close'].rolling(30).mean()
price_data['30_st_dev'] = price_data['close'].rolling(30).std()
price_data['Upper Band'] = price_data['30_ma'] + (price_data['30_st_dev'] * 2)
price_data['Upper Band'] = price_data['30_ma'] + (price_data['30_st_dev'] * 2)
slow = 26
fast = 12
smooth = 9
exp1 = price_data['close'].ewm(span = fast, adjust = False).mean()
exp2 = price_data['close'].ewm(span = slow, adjust = False).mean()
price_data['macd'] = exp1 - exp2
price_data['signal'] = price_data['macd'].ewm(span = smooth, adjust = False).mean()
price_data['hist'] = price_data['macd'] - price_data['signal']
price_data['macd_buy'] = np.where(price_data['macd'] > price_data['signal'], 1, 0)
price_data['macd_sell'] = np.where(price_data['macd'] < price_data['signal'], 1, 0)
price_data = price_data.tail(period_desc)
if chart_selection == "Candles":
# Create Candlestick Chart
candles = go.Figure(data=[go.Candlestick(x=price_data.index,
open=price_data['open'],
high=price_data['high'],
low=price_data['low'],
close=price_data['close'])])
candles.add_trace(go.Scatter(x=price_data.index, y=price_data['close'], name='Stock Close Price', opacity=0.5))
candles.update_yaxes(title="Stock Price")
candles.update_xaxes(title="Date")
candles.update_layout(title="Daily Stock Pricing")
st.plotly_chart(candles, use_container_width = True)
st.markdown(get_table_download_link(price_data), unsafe_allow_html=True)
elif chart_selection == "MACD (Moving Average Convergence Divergence)":
# Create MACD Chart
macd = make_subplots(specs=[[{"secondary_y": True}]])
#macd = go.Figure(data=[go.Candlestick(x=price_data.index, open=price_data['open'], high=price_data['high'], low=price_data['low'], close=price_data['close'])], secondary_y=False)
#macd.add_trace(go.Scatter(x=price_data.index, y=price_data['close'], name='Stock Close Price', opacity=0.5), secondary_y=False)
macd.add_trace(go.Scatter(x=price_data.index, y=price_data['signal'], name='MACD Signal'), secondary_y=True)
macd.add_trace(go.Scatter(x=price_data.index, y=price_data['macd'], name='MACD Formula'), secondary_y=True)
# Set y-axes titles
macd.update_yaxes(title_text="<b>Candles</b> Stock Price Data", secondary_y=False)
macd.update_yaxes(title_text="<b>MACD</b> Signals", secondary_y=True)
macd.update_xaxes(title="Date")
macd.update_layout(title="Stock MACD Graph")
st.plotly_chart(macd, title="Stock RSI Graph", use_container_width = True)
st.markdown('**Note: In general the guidance is when these two lines cross this should signal some action to be taken. When the MACD Signal > MACD Formula Line you should sell the stock based on this technical. And vice versa.**')
st.markdown(get_table_download_link(price_data), unsafe_allow_html=True)
elif chart_selection == "RSI (Relative Strength Indictor)":
# Create RSI Chart
#rsi = go.Figure()
rsi = make_subplots(specs=[[{"secondary_y": True}]])
rsi.add_trace(go.Scatter(x=price_data.index, y=price_data['RSI'], name='RSI Value'), secondary_y=False)
rsi.add_hline(y=30, line_dash="dot", annotation_text="Under Bought Signal", annotation_position="bottom right", line_color='green')
rsi.add_hline(y=70, line_dash="dot", annotation_text="Over Bought Signal", annotation_position="bottom right", line_color='red')
rsi.add_trace(go.Scatter(x=price_data.index, y=price_data['close'], name='Stock Price Close', opacity=0.3), secondary_y=True)
rsi.update_yaxes(title_text="<b>RSI</b> Relative Strength Indictor", secondary_y=False)
rsi.update_yaxes(title_text="<b>Stock Price</b> Close", secondary_y=True)
rsi.update_xaxes(title="Date")
rsi.update_layout(title="Stock RSI Graph")
st.plotly_chart(rsi, title="Stock RSI Graph", use_container_width = True)
st.markdown(get_table_download_link(price_data), unsafe_allow_html=True)
else:
# Create Candlestick Chart
candles = go.Figure(data=[go.Candlestick(x=price_data.index,
open=price_data['open'],
high=price_data['high'],
low=price_data['low'],
close=price_data['close'])])
candles.add_trace(go.Scatter(x=price_data.index, y=price_data['close'], name='Stock Close Price', opacity=0.5))
candles.update_yaxes(title="Stock Price")
candles.update_xaxes(title="Date")
candles.update_layout(title="Daily Stock Pricing")
# Create MACD Chart
macd = make_subplots(specs=[[{"secondary_y": True}]])
#macd = go.Figure(data=[go.Candlestick(x=price_data.index, open=price_data['open'], high=price_data['high'], low=price_data['low'], close=price_data['close'])], secondary_y=False)
macd.add_trace(go.Candlestick(x=price_data.index, open=price_data['open'], high=price_data['high'], low=price_data['low'], close=price_data['close']), secondary_y=False)
macd.add_trace(go.Scatter(x=price_data.index, y=price_data['signal'], name='MACD Signal'), secondary_y=True)
macd.add_trace(go.Scatter(x=price_data.index, y=price_data['macd'], name='MACD Formula'), secondary_y=True)
# Set y-axes titles
macd.update_yaxes(title_text="<b>Candles</b> Stock Price Data", secondary_y=False)
macd.update_yaxes(title_text="<b>MACD</b> Signals", secondary_y=True)
macd.update_xaxes(title="Date")
macd.update_layout(title="Stock MACD Graph")
# Create RSI Chart
#rsi = go.Figure()
rsi = make_subplots(specs=[[{"secondary_y": True}]])
rsi.add_trace(go.Scatter(x=price_data.index, y=price_data['RSI'], name='RSI Value'), secondary_y=False)
rsi.add_hline(y=30, line_dash="dot", annotation_text="Under Bought Signal", annotation_position="bottom right", line_color='green')
rsi.add_hline(y=70, line_dash="dot", annotation_text="Over Bought Signal", annotation_position="bottom right", line_color='red')
rsi.add_trace(go.Scatter(x=price_data.index, y=price_data['close'], name='Stock Price Close', opacity=0.3), secondary_y=True)
rsi.update_yaxes(title_text="<b>RSI</b> Relative Strength Indictor", secondary_y=False)
rsi.update_yaxes(title_text="<b>Stock Price</b> Close", secondary_y=True)
rsi.update_xaxes(title="Date")
rsi.update_layout(title="Stock RSI Graph")
st.plotly_chart(candles, use_container_width = True)
st.plotly_chart(macd, title="Stock RSI Graph", use_container_width = True)
st.plotly_chart(rsi, title="Stock RSI Graph", use_container_width = True)
st.write("## Analyst Recommendations")
recommendations_expander = st.beta_expander(" ", expanded=True)
with recommendations_expander:
ticker = yfin.Ticker(ticker_desc)
recommendations = ticker.recommendations
recommendations = recommendations[recommendations['Action'] == 'main']
recommendations = recommendations[recommendations.index > "2018-01-01"]
recommendations_agg = pd.DataFrame(recommendations.groupby(['To Grade', 'Firm']).size()).reset_index()
recommendations_agg.columns = ['Grade', 'Firm', 'Count of Analyst']
st.plotly_chart(px.bar(recommendations_agg, x="Grade" , y="Count of Analyst", color='Firm', title="Recommendations by Firm since 2018"), use_container_width=True)
col7, col8 = st.beta_columns(2)
with col7:
st.dataframe(recommendations_agg)
with col8:
st.dataframe(recommendations)
#option_data, calls, puts, option_data_new, maxStrikeValue, minStrikeValue, twenty_fifth_per, seventy_fifth_per, start_date, end_date = get_option_chain(ticker_desc)
#st.write("## Option Chain Activity for", pick_ticker)
#options_expander = st.beta_expander(" ", expanded=True)
#with options_expander:
#st.write("""https://www.nerdwallet.com/article/investing/options-trading-definitions used for understanding option chain data terms.""")
#st.write(option_data.astype('object'))
#st.write("### Options Filters:")
#date_selection = pd.DataFrame(option_data_new['expirygroup'])
#dummy_date_selector = pd.DataFrame({'expirygroup': ['Please Select a Date']})
#date_selection_new = dummy_date_selector.append(date_selection)
#date_slider = st.slider('Select date range', start_date.date(), end_date.date(), (start_date.date(), end_date.date()))
#option_strike_price_slider = st.slider("What Strike Prices would you like included?", float(minStrikeValue), float(maxStrikeValue), (float(twenty_fifth_per), float(seventy_fifth_per)))
#low_strike = option_strike_price_slider[0]
#high_strike = option_strike_price_slider[1]
#date_mask1 = (option_data_new['expirygroup'] >= start_date) & (option_data_new['expirygroup'] <= end_date)
#option_data_new = option_data_new.loc[date_mask1]
#strike_mask1 = (option_data_new['strike'] >= low_strike) & (option_data_new['strike'] <= high_strike)
#option_data_new = option_data_new.loc[strike_mask1]
#calls_clean = option_data_new[option_data_new['type'] == 'Call']
#puts_clean = option_data_new[option_data_new['type'] == 'Put']
#option_data = option_data.replace('--', 0)
#option_data['c_Volume'] = option_data['c_Volume'].astype(float)
#option_data['p_Volume'] = option_data['p_Volume'].astype(float)
#option_data['c_Openinterest'] = option_data['c_Openinterest'].astype(float)
#option_data['p_Openinterest'] = option_data['p_Openinterest'].astype(float)
#option_data['strike'] = option_data['strike'].astype(float)
#option_data['expirygroup'] = pd.to_datetime(option_data['expirygroup'])
#date_mask2 = (option_data['expirygroup'] >= start_date) & (option_data['expirygroup'] <= end_date)
#option_data = option_data.loc[date_mask2]
#strike_mask2 = (option_data['strike'] >= low_strike) & (option_data['strike'] <= high_strike)
#option_data = option_data.loc[strike_mask2]
#option_data_executed_volume_graph = pd.DataFrame(option_data.groupby('strike').agg({'c_Volume': 'sum', 'p_Volume': 'sum'})).reset_index()
#option_data_executed_volume_graph['call/put_ratio_Volume'] = option_data_executed_volume_graph['c_Volume'] / option_data_executed_volume_graph['p_Volume']
#option_data_open_interest_graph = pd.DataFrame(option_data.groupby('strike').agg({'c_Openinterest': 'sum', 'p_Openinterest': 'sum'})).reset_index()
#option_data_open_interest_graph['call/put_ratio_Openinterest'] = option_data_open_interest_graph['c_Openinterest'] / option_data_open_interest_graph['p_Openinterest']
# Create Volume / Openinterest Chart
#option_ratios_graph = make_subplots(specs=[[{"secondary_y": True}]])
#option_ratios_graph.add_trace(go.Scatter(x=option_data_executed_volume_graph['strike'], y=option_data_executed_volume_graph['call/put_ratio_Volume'], name='call/put_ratio_Volume'), secondary_y=False)
#option_ratios_graph.add_trace(go.Scatter(x=option_data_open_interest_graph['strike'], y=option_data_open_interest_graph['call/put_ratio_Openinterest'], name='call/put_ratio_Openinterest'), secondary_y=True)
# Set y-axes titles
#option_ratios_graph.update_yaxes(title_text="call/put_ratio_<b>Volume</b>", secondary_y=False)
#option_ratios_graph.update_yaxes(title_text="call/put_ratio_<b>Openinterest</b>", secondary_y=True)
#option_ratios_graph.update_xaxes(title="Strike Price")
#option_ratios_graph.update_layout(title="Stock Option Chain Ratio's")
#st.plotly_chart(option_ratios_graph, use_container_width=True)
#st.write("1.) Ratio used for chart above is based off said metrics calls / the same metrics puts. Trying to identify if there are any trends of people being call vs put heavy.")
#st.write("2.) Blue line is the indicator for Volume of options executed, Red line is the indicator for Openinterst in the market not yet executed.")
#col9, col10 = st.beta_columns(2)
#with col9:
#st.plotly_chart(px.bar(option_data_new, x="strike", y="Volume", color="type", hover_data=['Openinterest', 'expiryDate'], barmode = 'stack', title="Volume"))
# with col10:
#st.plotly_chart(px.bar(option_data_new, x="strike", y="Openinterest", color="type", hover_data=['Volume', 'expiryDate'], barmode = 'stack', title="Openinterest"))
#st.write('Open Interest by Strike Price, size by volume of options that have been exercised')
#fig = make_subplots(rows=1, cols=2, column_titles=('Calls', 'Puts'))
#fig = make_subplots(rows=1, cols=2, subplot_titles=("Calls", "Puts"))
#scatter1 = px.scatter(calls_clean, x="strike", y="Openinterest", size ="Volume", title="Calls")
#scatter2 = px.scatter(puts_clean, x="strike", y="Openinterest", size ="Volume", title="Puts")
#trace3 = scatter1['data'][0]
#trace4 = scatter2['data'][0]
#fig.add_trace(trace3, row=1, col=1)
#fig.add_trace(trace4, row=1, col=2)
#fig.update_layout(title="Open Interest by Strike Price, size by volume of options that have been exercised")
#st.plotly_chart(fig, use_container_width=True)
#st.write(option_data_new.astype('object'))
#st.markdown(get_table_download_link(option_data_new), unsafe_allow_html=True)
## pulling stock news from finnhub
news_response = finnhub_client.company_news(ticker_desc, _from= month_ago, to= today)
#news_response_one = finnhub_client.company_news(ticker_desc, _from= month_ago, to= today)
#news_response_two = finnhub_client.company_news(ticker_desc, _from= two_months_ago, to= month_ago)
news_df = json_normalize(news_response)
#news_df_one = json_normalize(news_response_one)
#news_df_two = json_normalize(news_response_two)
#news_df = news_df_one.append(news_df_two)
news_df = news_df.drop_duplicates(subset = ['headline'])
news_df['datetime'] = news_df['datetime'].astype(int)
news_df['date'] = pd.to_datetime(news_df['datetime'], unit='s')
news_df['date'] = news_df['date'].astype(str)
news_df['date'] = news_df['date'].str[:10]
#news_df = news_df.head(50)
## sentiment filtering
analyzer = SentimentIntensityAnalyzer()
news_df['compound'] = ([analyzer.polarity_scores(x)['compound'] for x in news_df['summary']])
news_df['compound'] = np.where(news_df['summary'] == "", ([analyzer.polarity_scores(x)['compound'] for x in news_df['headline']]), ([analyzer.polarity_scores(x)['compound'] for x in news_df['summary']]))
news_df['article_sentiment_bucket'] = pd.cut(news_df.compound, [-np.inf, -.10, .10, np.inf], labels=['negative', 'neutral', 'positive'])
news_df_short = news_df[['date', 'headline', 'image', 'source', 'summary', 'compound', 'article_sentiment_bucket', 'url']].copy()
news_sentiment_desc = pd.DataFrame(news_df_short['compound'].describe())
news_sentiment_desc = news_sentiment_desc.rename(columns = {'compound': 'Stock Sentiment Score'})
## creating dataframes for news visuals
source_sent_agg = news_df.groupby(['source', 'article_sentiment_bucket']).agg({'compound': 'mean', 'headline': 'count'}).reset_index()
source_sent_agg = source_sent_agg.dropna()
source_bucket_agg = news_df.groupby(['source', 'article_sentiment_bucket']).agg({'headline': 'count'}).reset_index()
source_bucket_agg = source_bucket_agg.dropna()
sent_agg = news_df.groupby('article_sentiment_bucket').agg({'compound': 'mean', 'headline': 'count'}).reset_index()
sent_agg = sent_agg.dropna()
date_sent_agg = news_df.groupby('date').agg({'compound': 'mean'}).reset_index()
date_sent_agg = date_sent_agg.dropna()
## creating dataframes for most positive/negative news
positive_news = news_df_short.sort_values(by=['compound'], ascending=False).head(10)
#pos_article_logo = positive_news.sort_values(by=['image'], ascending=False)
#pos_article_logo = pos_article_logo.head(1)
#pos_article_logo = pos_article_logo['image']
#pos_article_logo = pos_article_logo.values[0]
#pos_article_logo_response = requests.get(pos_article_logo)
#pos_article_image_bytes = io.BytesIO(pos_article_logo_response.content)
#pos_article_img = Image.open(pos_article_image_bytes)
negative_news = news_df_short.sort_values(by=['compound'], ascending=True).head(10)
#neg_article_logo = negative_news.sort_values(by=['image'], ascending=False)
#neg_article_logo = neg_article_logo.head(1)
#neg_article_logo = neg_article_logo['image']
#neg_article_logo = neg_article_logo.values[0]
#neg_article_logo_response = requests.get(neg_article_logo)
#neg_article_image_bytes = io.BytesIO(neg_article_logo_response.content)
#neg_article_img = Image.open(neg_article_image_bytes)
st.write("## News about", pick_ticker)
news_exapnder = st.beta_expander(" ", expanded=True)
with news_exapnder:
#st.dataframe(news_df)
col9, col10 = st.beta_columns((2,1))
with col9:
#st.write("## Daily News Sentiment for", pick_ticker)
# Create Daily Sentiment Chart
daily_stock_sentiment = make_subplots(specs=[[{"secondary_y": False}]])
daily_stock_sentiment.add_trace(go.Scatter(x=date_sent_agg['date'], y=date_sent_agg['compound'], name='Sentiment Value'), secondary_y=False)
daily_stock_sentiment.add_hline(y=.10, line_dash="dot", annotation_text="Positive Sentiment Threshold", annotation_position="bottom right", line_color='green')
daily_stock_sentiment.add_hline(y=-.10, line_dash="dot", annotation_text="Negative Sentiment Threshold", annotation_position="bottom right", line_color='red')
daily_stock_sentiment.update_yaxes(title_text="News Sentiment", secondary_y=False)
daily_stock_sentiment.update_xaxes(title="Date")
daily_stock_sentiment.update_layout(title="Daily Stock Sentiment")
st.plotly_chart(daily_stock_sentiment, use_container_width = True)
#st.plotly_chart(px.line(date_sent_agg, x="date", y="compound", hover_data=['compound']))
#st.write("## News Sentiment by Source", pick_ticker)
# Create Source Sentiment Chart
st.plotly_chart(px.scatter(source_sent_agg, x="source", y="compound", size ="headline", color = 'article_sentiment_bucket', hover_data=['compound'], title="Sentiment by News Source"),use_container_width=True)
#st.plotly_chart(px.box(news_df, y="compound", title=" Sentiment Percentiles"))
st.write("## Top Most Positive News Atricles")
st.write(positive_news.astype('object'))
st.markdown(get_table_download_link(positive_news), unsafe_allow_html=True)
st.write("## Top Most Negative News Atricles")
st.write(negative_news.astype('object'))
st.markdown(get_table_download_link(negative_news), unsafe_allow_html=True)
with col10:
#st.write("## News Stats")
st.plotly_chart(px.bar(sent_agg, y="article_sentiment_bucket", x="compound", orientation='h', title="Average Sentiment Score by Bucket"), use_container_width=True)
st.plotly_chart(px.bar(sent_agg, y="article_sentiment_bucket", x="headline", orientation='h', title="Count of News Articles by Bucket"), use_container_width=True)
#st.write(news_sentiment_desc.astype('object'))
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.image(pos_article_img)
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.write(" ")
#st.image(neg_article_img)
st.write(news_df_short.astype('object'))
st.markdown(get_table_download_link(news_df_short), unsafe_allow_html=True)
#with col11:
#st.title("Artcile Image")
#for i in news_df_short['summary']:
#article_img = news_df_short['image'][0]
#article_response = requests.get(article_img)
#article_image_bytes = io.BytesIO(article_response.content)
#article_img = Image.open(article_image_bytes)
#st.image(article_img)
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,762
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/home.py
|
import streamlit as st
import base64
from PIL import Image
def app():
st.markdown("<h1 style='text-align: center;'>Superior Returns Stock Analysis Home Page</h1>", unsafe_allow_html=True)
col1, col2, col3 = st.beta_columns([1,2,1])
#st.title('Stock Trends')
with col2:
bull = Image.open('bear_bull_new.jpg')
st.image(bull, caption='Superior Returns', width=680)
st.write("""## See Instructions Below:""")
st.write("""### 1.) Stock Analysis Page: Here you can search for stocks you are interested in and see key details that are helpful for understanding the performance of a stock.""")
st.write(""" a.) Technical Indictor Analysis including Candles, MACD (Moving Average Convergence Divergence) Indicators, and RSI (Relative Strength Indicators)""")
st.write(""" b.) Analyst Recommendation Data""")
st.write(""" c.) Stock Option Chain Data""")
st.write(""" d.) Sentiment Analysis from Stock specific news articles""")
st.write("""### 2.) Crypto Page: Here you can search for crypto you are interested in and see key details that are helpful for understanding the performance of a crypto currency.""")
st.write(""" a.) Technical Indictor Analysis including Candles, MACD (Moving Average Convergence Divergence) Indicators, and RSI (Relative Strength Indicators)""")
st.write("""### 3.) Industry Trends Analysis Page: Coming Soon!""")
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,763
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/app.py
|
import pandas as pd
import numpy as np
from pandas import json_normalize
#from selenium.webdriver.support.expected_conditions import element_selection_state_to_be
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
import base64
import plotly.express as px
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import matplotlib.pyplot as plt
import io
from math import floor
## sentiment analysis
import re
import nltk
nltk.download('vader_lexicon')
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
##stock packages
import yfinance as yfin
import finnhub
## web crawling packages
import time
from datetime import date
from datetime import datetime
import datetime
import requests
from lxml import html
import csv
import home
import stock_trends
import stock_analysis
import crypto
import option_chain_activity
PAGES = {
"Home": home,
"Stock Analysis": stock_analysis,
"Stock Trends": stock_trends,
"Crypto": crypto,
"Option Chain Analysis": option_chain_activity
}
##Setting Streamlit Settings
st.set_page_config(layout="wide")
st.sidebar.title('Navigation')
selection = st.sidebar.radio("Go to", list(PAGES.keys()))
page = PAGES[selection]
page.app()
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,764
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/option_chain_activity.py
|
import pandas as pd
import numpy as np
from pandas import json_normalize
import json
#from selenium.webdriver.support.expected_conditions import element_selection_state_to_be
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
import base64
import plotly.express as px
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import matplotlib.pyplot as plt
import io
from math import floor
## sentiment analysis
import re
import nltk
from nltk.corpus import stopwords
from nltk.sentiment.vader import SentimentIntensityAnalyzer
##stock packages
import yfinance as yfin
import finnhub
## web crawling packages
import time
from datetime import date
from datetime import datetime
import datetime
import requests
from lxml import html
import csv
##functions
def get_table_download_link(df, filename='download', message='Download csv result file'):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="{filename}.csv" >{message}</a>'
return href
def get_option_chain(ticker_desc):
## running stock option scraping
base1 = "https://api.nasdaq.com/api/quote/"
base2 = "/option-chain?assetclass=stocks&fromdate=all&todate=undefined&excode=oprac&callput=callput&money=all&type=all"
url = base1 + str(ticker_desc) + base2
payload={}
#headers = {
#'User-Agent': 'PostmanRuntime/7.28.4',
#'Accept': '*/*',
#'Accept-Encoding': 'gzip, deflate, br',
#'Content-Length': '1970',
#'Connection': 'keep-alive'
#}
headers = {
'User-Agent': 'PostmanRuntime/7.28.4',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br'
}
response = requests.get(url, headers=headers, data=payload)
response_text = json.loads(response.text)
price = json_normalize(response_text['data'])
price = pd.DataFrame(price['lastTrade'], columns=['lastTrade'])
price[['last', 'trade', 'price', 'as', 'of', 'sep', 'day', 'year']]= price["lastTrade"].str.split(" ", expand = True)
price_new = price['price'].str[1:][0]
price_new = float(price_new)
option_data = json_normalize(response_text['data']['table'], 'rows')
option_data = option_data.drop(columns = ['drillDownURL'])
option_data['expirygroup'] = option_data['expirygroup'].replace('', np.nan).ffill()
option_data['expirygroup'] = pd.to_datetime(option_data['expirygroup'])
option_data = option_data.dropna(subset = ['strike'])
calls = option_data[['expirygroup', 'expiryDate', 'c_Last', 'c_Change', 'c_Bid', 'c_Ask', 'c_Volume', 'c_Openinterest', 'c_colour', 'strike']].copy()
calls = calls.rename(columns = {'c_Last': 'Last', 'c_Change': 'Change', 'c_Bid': 'Bid', 'c_Ask': 'Ask', 'c_Volume': 'Volume', 'c_Openinterest': 'Openinterest', 'c_colour': 'colour'})
calls['type'] = "Call"
calls['strike'] = calls['strike'].astype(float)
calls['money_category'] = np.where(calls['strike'] <= price_new, "In the Money", "Out of the Money")
puts = option_data[['expirygroup', 'expiryDate', 'p_Last', 'p_Change', 'p_Bid', 'p_Ask', 'p_Volume', 'p_Openinterest', 'p_colour', 'strike']].copy()
puts = puts.rename(columns = {'p_Last': 'Last', 'p_Change': 'Change', 'p_Bid': 'Bid', 'p_Ask': 'Ask', 'p_Volume': 'Volume', 'p_Openinterest': 'Openinterest', 'p_colour': 'colour'})
puts['type'] = "Put"
puts['strike'] = puts['strike'].astype(float)
puts['money_category'] = np.where(puts['strike'] >= price_new, "In the Money", "Out of the Money")
option_data_new = calls.append(puts)
option_data_new = option_data_new.replace('--', 0)
option_data_new['Last'] = option_data_new['Last'].astype(float)
option_data_new['Change'] = option_data_new['Change'].astype(float)
option_data_new['Bid'] = option_data_new['Bid'].astype(float)
option_data_new['Ask'] = option_data_new['Ask'].astype(float)
option_data_new['Volume'] = option_data_new['Volume'].astype(float)
option_data_new['Openinterest'] = option_data_new['Openinterest'].astype(float)
option_data_new['strike'] = option_data_new['strike'].astype(float)
option_data_new['new_date']= option_data_new['expirygroup']
#option_data_new['expirygroup'] = option_data_new['expirygroup'].astype(str)
maxStrikeValue = option_data_new['strike'].max()
minStrikeValue = option_data_new['strike'].min()
twenty_fifth_per = np.percentile(option_data_new['strike'], 25)
seventy_fifth_per = np.percentile(option_data_new['strike'], 75)
first_date = option_data_new['expirygroup'].head(1)
last_date = option_data_new['expirygroup'].tail(1)
start_date = pd.to_datetime(first_date.unique()[0])
end_date = pd.to_datetime(last_date.unique()[0])
return option_data, calls, puts, option_data_new, maxStrikeValue, minStrikeValue, twenty_fifth_per, seventy_fifth_per, start_date, end_date
def app():
##importing files needed for web app
today = date.today()
year_ago = today - datetime.timedelta(days=365)
unixtime_today = time.mktime(today.timetuple())
unixtime_year = time.mktime(year_ago.timetuple())
finnhub_client = finnhub.Client(api_key="c3qcjnqad3i9vt5tl68g")
##importing files needed for web app
ticker_selection = pd.read_csv('data/tickers_only.csv')
filler_df = pd.DataFrame({"full": ['Please Search for a Stock'],"url": 'Please Search for a Stock', "ticker": 'Please Search for a Stock', "company_name": 'Please Search for a Stock'})
ticker_selection = filler_df.append(ticker_selection)
col1, col2 = st.beta_columns(2)
## Streamlit
with col1:
st.write("""
# Superior Returns Stock Option Chain Analysis
""")
st.write("""## Data Sources:""")
st.write("""1.) https://www.nasdaq.com/market-activity/stocks/""")
#st.write("""2.) used for crawling avaialble Crypto tickers""")
#with col2:
#bull = Image.open('image.jpg')
#st.image(bull, caption='Superior Returns', use_column_width=True) #use_column_width=True, width = 100)
## Need to make container and columns for Filters
st.write("## Filters")
filter_expander = st.beta_expander(" ", expanded=True)
with filter_expander:
col3, col4 = st.beta_columns(2)
with col3:
pick_ticker = st.selectbox("Select Stock Ticker", ticker_selection["full"].unique())
#pick_ticker_all = pick_ticker
st.write("You have selected", pick_ticker)
if pick_ticker == "Please Search for a Stock":
pass
else:
with col4:
st.write(pick_ticker[0])
ticker_row_selected = ticker_selection.loc[ticker_selection["full"] == pick_ticker]
ticker_desc = ticker_row_selected['ticker'].unique()
ticker_desc = ticker_desc[0]
st.write("Ticker Symbol", ticker_desc, ".")
ticker_url = ticker_row_selected['url'].unique()
ticker_url = ticker_url[0]
st.write("Ticker Url", ticker_url)
ticker = yfin.Ticker(ticker_desc)
logo = ticker.info
logo = json_normalize(logo)
logo = logo['logo_url']
logo = logo[0]
response = requests.get(logo)
image_bytes = io.BytesIO(response.content)
img = Image.open(image_bytes)
st.image(img)
if pick_ticker == "Please Search for a Stock":
pass
else:
option_data, calls, puts, option_data_new, maxStrikeValue, minStrikeValue, twenty_fifth_per, seventy_fifth_per, start_date, end_date = get_option_chain(ticker_desc)
st.write("## Option Chain Activity for", pick_ticker)
options_expander = st.beta_expander(" ", expanded=True)
with options_expander:
st.write("""https://www.nerdwallet.com/article/investing/options-trading-definitions used for understanding option chain data terms.""")
#st.write(option_data.astype('object'))
st.write("### Options Filters:")
date_selection = pd.DataFrame(option_data_new['expirygroup'])
dummy_date_selector = pd.DataFrame({'expirygroup': ['Please Select a Date']})
date_selection_new = dummy_date_selector.append(date_selection)
date_slider = st.slider('Select date range', start_date.date(), end_date.date(), (start_date.date(), end_date.date()))
option_strike_price_slider = st.slider("What Strike Prices would you like included?", float(minStrikeValue), float(maxStrikeValue), (float(twenty_fifth_per), float(seventy_fifth_per)))
low_strike = option_strike_price_slider[0]
high_strike = option_strike_price_slider[1]
lowDate = date_slider[0]
st.write(lowDate)
highDate = date_slider[1]
st.write(highDate)
option_data_new['expirygroup'] = pd.to_datetime(option_data_new['expirygroup']).dt.date
date_mask1 = (option_data_new['expirygroup'] >= lowDate) & (option_data_new['expirygroup'] <= highDate)
option_data_new = option_data_new.loc[date_mask1]
strike_mask1 = (option_data_new['strike'] >= low_strike) & (option_data_new['strike'] <= high_strike)
option_data_new = option_data_new.loc[strike_mask1]
calls_clean = option_data_new[option_data_new['type'] == 'Call']
puts_clean = option_data_new[option_data_new['type'] == 'Put']
option_data = option_data.replace('--', 0)
option_data['c_Volume'] = option_data['c_Volume'].astype(float)
option_data['p_Volume'] = option_data['p_Volume'].astype(float)
option_data['c_Openinterest'] = option_data['c_Openinterest'].astype(float)
option_data['p_Openinterest'] = option_data['p_Openinterest'].astype(float)
option_data['strike'] = option_data['strike'].astype(float)
option_data['expirygroup'] = pd.to_datetime(option_data['expirygroup']).dt.date
date_mask2 = (option_data['expirygroup'] >= lowDate) & (option_data['expirygroup'] <= highDate)
option_data = option_data.loc[date_mask2]
strike_mask2 = (option_data['strike'] >= low_strike) & (option_data['strike'] <= high_strike)
option_data = option_data.loc[strike_mask2]
option_data_executed_volume_graph = pd.DataFrame(option_data.groupby('strike').agg({'c_Volume': 'sum', 'p_Volume': 'sum'})).reset_index()
option_data_executed_volume_graph['call/put_ratio_Volume'] = option_data_executed_volume_graph['c_Volume'] / option_data_executed_volume_graph['p_Volume']
option_data_open_interest_graph = pd.DataFrame(option_data.groupby('strike').agg({'c_Openinterest': 'sum', 'p_Openinterest': 'sum'})).reset_index()
option_data_open_interest_graph['call/put_ratio_Openinterest'] = option_data_open_interest_graph['c_Openinterest'] / option_data_open_interest_graph['p_Openinterest']
# Create Volume / Openinterest Chart
option_ratios_graph = make_subplots(specs=[[{"secondary_y": True}]])
option_ratios_graph.add_trace(go.Scatter(x=option_data_executed_volume_graph['strike'], y=option_data_executed_volume_graph['call/put_ratio_Volume'], name='call/put_ratio_Volume'), secondary_y=False)
option_ratios_graph.add_trace(go.Scatter(x=option_data_open_interest_graph['strike'], y=option_data_open_interest_graph['call/put_ratio_Openinterest'], name='call/put_ratio_Openinterest'), secondary_y=True)
# Set y-axes titles
option_ratios_graph.update_yaxes(title_text="call/put_ratio_<b>Volume</b>", secondary_y=False)
option_ratios_graph.update_yaxes(title_text="call/put_ratio_<b>Openinterest</b>", secondary_y=True)
option_ratios_graph.update_xaxes(title="Strike Price")
option_ratios_graph.update_layout(title="Stock Option Chain Ratio's")
st.plotly_chart(option_ratios_graph, use_container_width=True)
st.write("1.) Ratio used for chart above is based off said metrics calls / the same metrics puts. Trying to identify if there are any trends of people being call vs put heavy.")
st.write("2.) Blue line is the indicator for Volume of options executed, Red line is the indicator for Openinterst in the market not yet executed.")
col9, col10 = st.beta_columns(2)
with col9:
st.plotly_chart(px.bar(option_data_new, x="strike", y="Volume", color="type", hover_data=['Openinterest', 'expiryDate'], barmode = 'stack', title="Volume"))
with col10:
st.plotly_chart(px.bar(option_data_new, x="strike", y="Openinterest", color="type", hover_data=['Volume', 'expiryDate'], barmode = 'stack', title="Openinterest"))
#st.write('Open Interest by Strike Price, size by volume of options that have been exercised')
fig = make_subplots(rows=1, cols=2, column_titles=('Calls', 'Puts'))
#fig = make_subplots(rows=1, cols=2, subplot_titles=("Calls", "Puts"))
scatter1 = px.scatter(calls_clean, x="strike", y="Openinterest", size ="Volume", title="Calls")
scatter2 = px.scatter(puts_clean, x="strike", y="Openinterest", size ="Volume", title="Puts")
trace3 = scatter1['data'][0]
trace4 = scatter2['data'][0]
fig.add_trace(trace3, row=1, col=1)
fig.add_trace(trace4, row=1, col=2)
fig.update_layout(title="Open Interest by Strike Price, size by volume of options that have been exercised")
st.plotly_chart(fig, use_container_width=True)
st.write(option_data_new.astype('object'))
st.markdown(get_table_download_link(option_data_new), unsafe_allow_html=True)
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,765
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/functions.py.py
|
import streamlit as st
import base64
from PIL import Image
## web crawling packages
import time
from datetime import date
from datetime import datetime
import datetime
import requests
from lxml import html
import csv
import home
import stock_trends
import stock_analysis
PAGES = {
"Home": home,
"Stock Analysis": stock_analysis,
"Stock Trends": stock_trends
}
##Setting Streamlit Settings
st.set_page_config(layout="wide")
st.sidebar.title('Navigation')
selection = st.sidebar.radio("Go to", list(PAGES.keys()))
page = PAGES[selection]
page.app()
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,766
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/crypto.py
|
import pandas as pd
import numpy as np
from pandas import json_normalize
#from selenium.webdriver.support.expected_conditions import element_selection_state_to_be
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
import base64
import plotly.express as px
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
#import matplotlib.pyplot as plt
import io
from math import floor
from datetime import date
import datetime
import time
##Crypto packages
import finnhub
def RSI(data, time_window):
diff = data.diff(1).dropna() # diff in one field(one day)
#this preservers dimensions off diff values
up_chg = 0 * diff
down_chg = 0 * diff
# up change is equal to the positive difference, otherwise equal to zero
up_chg[diff > 0] = diff[ diff>0 ]
# down change is equal to negative deifference, otherwise equal to zero
down_chg[diff < 0] = diff[ diff < 0 ]
# check pandas documentation for ewm
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html
# values are related to exponential decay
# we set com=time_window-1 so we get decay alpha=1/time_window
up_chg_avg = up_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
down_chg_avg = down_chg.ewm(com=time_window-1 , min_periods=time_window).mean()
rs = abs(up_chg_avg/down_chg_avg)
rsi = 100 - 100/(1+rs)
return rsi
def get_macd(price, slow, fast, smooth):
exp1 = price.ewm(span = fast, adjust = False).mean()
exp2 = price.ewm(span = slow, adjust = False).mean()
macd = pd.DataFrame(exp1 - exp2).rename(columns = {'close':'macd'})
signal = pd.DataFrame(macd.ewm(span = smooth, adjust = False).mean()).rename(columns = {'macd':'signal'})
hist = pd.DataFrame(macd['macd'] - signal['signal']).rename(columns = {0:'hist'})
frames = [macd, signal, hist]
df = pd.concat(frames, join = 'inner', axis = 1)
return df
def app():
##Setting Streamlit Settings
#st.set_page_config(layout="wide")
##importing files needed for web app
today = date.today()
year_ago = today - datetime.timedelta(days=365)
unixtime_today = time.mktime(today.timetuple())
unixtime_year = time.mktime(year_ago.timetuple())
finnhub_client = finnhub.Client(api_key="c3qcjnqad3i9vt5tl68g")
symbol_df = pd.read_csv('data/crypto_symbol_df.csv')
symbol_df['currency'] = symbol_df['description'].str[-3:]
symbol_df_short = symbol_df[symbol_df['currency'] == 'USD']
symbol_df_short = symbol_df_short.drop(columns = ['currency', 'Unnamed: 0'])
filler_df = pd.DataFrame({"description": ['Please Search for a Crypto'],"displaySymbol": 'Please Search for a Crypto', "symbol": 'Please Search for a Crypto'})
today = date.today()
month_ago = today - datetime.timedelta(days=31)
two_months_ago = today - datetime.timedelta(days=62)
year_ago = today - datetime.timedelta(days=365)
unixtime_today = time.mktime(today.timetuple())
unixtime_year = time.mktime(year_ago.timetuple())
symbol_selection = filler_df.append(symbol_df_short)
col1, col2 = st.beta_columns(2)
## Streamlit
with col1:
st.write("""
# Superior Returns Crypto Exploration Application
""")
st.write("""## Data Sources:""")
st.write("""1.) finnhub python package""")
#st.write("""2.) used for crawling avaialble Crypto tickers""")
#with col2:
#bull = Image.open('image.jpg')
#st.image(bull, caption='Superior Returns', use_column_width=True) #use_column_width=True, width = 100)
## Need to make container and columns for Filters
st.write("## Filters")
filter_expander = st.beta_expander(" ", expanded=True)
with filter_expander:
col3, col4 = st.beta_columns(2)
with col3:
symbol_sel = st.selectbox("Select Crypto Symbol", symbol_selection["description"].unique())
#pick_ticker_all = pick_ticker
st.write("You have selected", symbol_sel)
with col4:
symbol_selection = symbol_selection.loc[symbol_selection["description"] == symbol_sel]
symbol_desc = symbol_selection['symbol'].unique()
symbol_desc = symbol_desc[0]
st.write("Crypto Symbol:", symbol_desc, ".")
if symbol_sel == "Please Search for a Crypto":
pass
else:
candles = finnhub_client.crypto_candles(symbol_desc, 'D', int(unixtime_year), int(unixtime_today))
candles_df = pd.DataFrame(candles)
#candles_df = candles_df.reset_index()
#candles_df = candles_df.drop(columns = ['index'])
candles_df = candles_df.rename(columns = {'c':'close', 'h': 'high', 'l': 'low', 'o': 'open', 's': 'status', 't': 'timestamp','v': 'volumne'})
candles_df['date'] = pd.to_datetime(candles_df['timestamp'], unit='s')
st.write("## Crypto Performance")
crypto_performance_expander = st.beta_expander(" ", expanded=True)
##making options to show different graphs
period_list = {'Period':['1 Week', '1 Month', '3 Months', '6 Months', '1 Year'], 'Period_value':[5, 23, 69, 138, 250]}
period_dict = pd.DataFrame(period_list)
with crypto_performance_expander:
col5, col6 = st.beta_columns((1,3))
with col5:
period_selection = st.selectbox("Select Time Period", period_dict['Period'].unique())
period_row_selected = period_dict.loc[period_dict["Period"] == period_selection]
period_desc = period_row_selected['Period_value'].unique()
period_desc = period_desc[0]
chart_selection = st.radio("Pick Which Crypto Price Analysis you would like to look at", ("Candles", "MACD (Moving Average Convergence Divergence)", "RSI (Relative Strength Indictor)", "All"))
with col6:
#st.write(candles_df.astype('object'))
candles_df.index = candles_df['date']
candles_df = candles_df.drop(columns=['date'])
candles_df['RSI'] = RSI(candles_df['close'], 14)
candles_df['30_ma'] = candles_df['close'].rolling(30).mean()
candles_df['30_st_dev'] = candles_df['close'].rolling(30).std()
candles_df['Upper Band'] = candles_df['30_ma'] + (candles_df['30_st_dev'] * 2)
candles_df['Upper Band'] = candles_df['30_ma'] + (candles_df['30_st_dev'] * 2)
slow = 26
fast = 12
smooth = 9
exp1 = candles_df['close'].ewm(span = fast, adjust = False).mean()
exp2 = candles_df['close'].ewm(span = slow, adjust = False).mean()
candles_df['macd'] = exp1 - exp2
candles_df['signal'] = candles_df['macd'].ewm(span = smooth, adjust = False).mean()
candles_df['hist'] = candles_df['macd'] - candles_df['signal']
candles_df['macd_buy'] = np.where(candles_df['macd'] > candles_df['signal'], 1, 0)
candles_df['macd_sell'] = np.where(candles_df['macd'] < candles_df['signal'], 1, 0)
candles_df = candles_df.tail(period_desc)
if chart_selection == "Candles":
# Create Candlestick Chart
candles = go.Figure(data=[go.Candlestick(x=candles_df.index,
open=candles_df['open'],
high=candles_df['high'],
low=candles_df['low'],
close=candles_df['close'])])
candles.add_trace(go.Scatter(x=candles_df.index, y=candles_df['close'], name='Crypto Close Price', opacity=0.5))
candles.update_yaxes(title="Crypto Price")
candles.update_xaxes(title="Date")
candles.update_layout(title="Daily Crypto Pricing")
st.plotly_chart(candles, use_container_width = True)
elif chart_selection == "MACD (Moving Average Convergence Divergence)":
# Create MACD Chart
macd = make_subplots(specs=[[{"secondary_y": True}]])
#macd = go.Figure(data=[go.Candlestick(x=candles_df.index, open=candles_df['open'], high=candles_df['high'], low=candles_df['low'], close=candles_df['close'])], secondary_y=False)
#macd.add_trace(go.Scatter(x=candles_df.index, y=candles_df['close'], name='Crypto Close Price', opacity=0.5), secondary_y=False)
macd.add_trace(go.Scatter(x=candles_df.index, y=candles_df['signal'], name='MACD Signal'), secondary_y=True)
macd.add_trace(go.Scatter(x=candles_df.index, y=candles_df['macd'], name='MACD Formula'), secondary_y=True)
# Set y-axes titles
macd.update_yaxes(title_text="<b>Candles</b> Crypto Price Data", secondary_y=False)
macd.update_yaxes(title_text="<b>MACD</b> Signals", secondary_y=True)
macd.update_xaxes(title="Date")
macd.update_layout(title="Crypto MACD Graph")
st.plotly_chart(macd, title="Crypto RSI Graph", use_container_width = True)
st.markdown('**Note: In general the guidance is when these two lines cross this should signal some action to be taken. When the MACD Signal > MACD Formula Line you should sell the Crypto based on this technical. And vice versa.**')
elif chart_selection == "RSI (Relative Strength Indictor)":
# Create RSI Chart
#rsi = go.Figure()
rsi = make_subplots(specs=[[{"secondary_y": True}]])
rsi.add_trace(go.Scatter(x=candles_df.index, y=candles_df['RSI'], name='RSI Value'), secondary_y=False)
rsi.add_hline(y=30, line_dash="dot", annotation_text="Under Bought Signal", annotation_position="bottom right", line_color='green')
rsi.add_hline(y=70, line_dash="dot", annotation_text="Over Bought Signal", annotation_position="bottom right", line_color='red')
rsi.add_trace(go.Scatter(x=candles_df.index, y=candles_df['close'], name='Crypto Price Close', opacity=0.3), secondary_y=True)
rsi.update_yaxes(title_text="<b>RSI</b> Relative Strength Indictor", secondary_y=False)
rsi.update_yaxes(title_text="<b>Crypto Price</b> Close", secondary_y=True)
rsi.update_xaxes(title="Date")
rsi.update_layout(title="Crypto RSI Graph")
st.plotly_chart(rsi, title="Crypto RSI Graph", use_container_width = True)
else:
# Create Candlestick Chart
candles = go.Figure(data=[go.Candlestick(x=candles_df.index,
open=candles_df['open'],
high=candles_df['high'],
low=candles_df['low'],
close=candles_df['close'])])
candles.add_trace(go.Scatter(x=candles_df.index, y=candles_df['close'], name='Crypto Close Price', opacity=0.5))
candles.update_yaxes(title="Crypto Price")
candles.update_xaxes(title="Date")
candles.update_layout(title="Daily Crypto Pricing")
# Create MACD Chart
macd = make_subplots(specs=[[{"secondary_y": True}]])
#macd = go.Figure(data=[go.Candlestick(x=candles_df.index, open=candles_df['open'], high=candles_df['high'], low=candles_df['low'], close=candles_df['close'])], secondary_y=False)
macd.add_trace(go.Candlestick(x=candles_df.index, open=candles_df['open'], high=candles_df['high'], low=candles_df['low'], close=candles_df['close']), secondary_y=False)
macd.add_trace(go.Scatter(x=candles_df.index, y=candles_df['signal'], name='MACD Signal'), secondary_y=True)
macd.add_trace(go.Scatter(x=candles_df.index, y=candles_df['macd'], name='MACD Formula'), secondary_y=True)
# Set y-axes titles
macd.update_yaxes(title_text="<b>Candles</b> Crypto Price Data", secondary_y=False)
macd.update_yaxes(title_text="<b>MACD</b> Signals", secondary_y=True)
macd.update_xaxes(title="Date")
macd.update_layout(title="Crypto MACD Graph")
# Create RSI Chart
#rsi = go.Figure()
rsi = make_subplots(specs=[[{"secondary_y": True}]])
rsi.add_trace(go.Scatter(x=candles_df.index, y=candles_df['RSI'], name='RSI Value'), secondary_y=False)
rsi.add_hline(y=30, line_dash="dot", annotation_text="Under Bought Signal", annotation_position="bottom right", line_color='green')
rsi.add_hline(y=70, line_dash="dot", annotation_text="Over Bought Signal", annotation_position="bottom right", line_color='red')
rsi.add_trace(go.Scatter(x=candles_df.index, y=candles_df['close'], name='Crypto Price Close', opacity=0.3), secondary_y=True)
rsi.update_yaxes(title_text="<b>RSI</b> Relative Strength Indictor", secondary_y=False)
rsi.update_yaxes(title_text="<b>Crypto Price</b> Close", secondary_y=True)
rsi.update_xaxes(title="Date")
rsi.update_layout(title="Crypto RSI Graph")
st.plotly_chart(candles, use_container_width = True)
st.plotly_chart(macd, title="Crypto RSI Graph", use_container_width = True)
st.plotly_chart(rsi, title="Crypto RSI Graph", use_container_width = True)
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,767
|
webclinic017/streamlit-stock-analysis
|
refs/heads/main
|
/get_stock_tickers.py
|
import requests
import time
import pandas as pd
import numpy as np
from lxml import html
import csv
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
"""
## This script is for scraping available stock tickers. Having a list available to choose from will increase user expereince by enabling easier searching of companies.
"""
def get_tickers():
"""
## This function is for scraping the available stock tickers from the website https://stockanalysis.com/stocks/.
## We will be using a web scraping technique "xpath". This is essentially reading a websites html code and getting the elements you want.
"""
web = "https://stockanalysis.com/stocks/"
driver = webdriver.Chrome(r'C:\Users\rober\Anaconda3\bin\chromedriver')
driver.get(web)
sel = Select(driver.find_element_by_xpath('//select[@name="perpage"]'))
sel.select_by_visible_text("10000")
time.sleep(5)
ticker_list = []
company_name_list = []
industry_list = []
## starting to find elements
ticker = driver.find_elements_by_xpath('//*[@id="main"]/div/div/div[2]/table/tbody/tr/td[1]/a')
for a in range(len(ticker)):
ticker_list.append(ticker[a].text)
company_name = driver.find_elements_by_xpath('//*[@id="main"]/div/div/div[2]/table/tbody/tr/td[2]')
for b in range(len(company_name)):
company_name_list.append(company_name[b].text)
industry = driver.find_elements_by_xpath('//*[@id="main"]/div/div/div[2]/table/tbody/tr/td[3]')
for c in range(len(industry)):
industry_list.append(industry[c].text)
## Creating dataframes so I can join this all together
ticker_df = pd.DataFrame(ticker_list)
company_name_df = pd.DataFrame(company_name_list)
industry_df = pd.DataFrame(industry_list)
big_df = pd.concat([ticker_df, company_name_df, industry_df], axis=1)
return big_df
def clean_tickers(big_df):
"""
## This function is for cleaning the original list of tickers. There are some items from the list that are either not legit tickers that we remove here.
## The last part of this function is creating a dataframe that we then use to create some additional features to help with searching.
"""
## creating a couple new columns to create a link and full searchable stock name plus ticker to help with app search.
big_df.columns = ['ticker', 'company_name', 'industry']
big_df['full'] = big_df['ticker'] + " - " + big_df['company_name']
big_df['url'] = "https://stockanalysis.com/stocks/" + big_df['ticker'] + "/"
big_df['full'] = big_df['full'].astype('str')
big_df['ticker'] = big_df.ticker.str.strip()
## create csv to use for streamlit app
big_df.to_csv('data/tickers_only.csv')
return big_df
big_df = get_tickers()
ticker_df = clean_tickers(big_df)
|
{"/app.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py", "/crypto.py", "/option_chain_activity.py"], "/functions.py.py": ["/home.py", "/stock_trends.py", "/stock_analysis.py"]}
|
31,783
|
migueldelafuente1/taurus_runner
|
refs/heads/main
|
/src_taurus_runner/t_result.py
|
'''
Created on Jan 21, 2021
@author: Miguel
'''
from t_sources import Enum
# import pandas as pd
#===============================================================================
# OUTPUT RECOVER OF THE FILES
#===============================================================================
class ResultException(BaseException):
pass
class Result():
class outputFilesEnum(Enum):
occupation_numbers = 'occupation_numbers.dat'
canonical_basis = 'canonicalbasis.dat'
eigenbasis_h = 'eigenbasis_h.dat'
eigenbasis_H11 = 'eigenbasis_H11.dat'
final_wave_function = 'final_wf.bin'
reduced_hamiltonian = 'usdb.red'
class OutputBlocks(Enum):
INPUT_PARAMETERS = "INPUT_PARAMETERS"
NUCLEUS = "NUCLEUS"
HO_BASIS = "HO_BASIS"
HAMILTONIAN = "HAMILTONIAN"
WAVE_FUNCTION = "WAVE_FUNCTION"
ITERATIVE_MINIMIZATION = "ITERATIVE_MINIMIZATION"
QUASIPARTICLE_STATE_PROPERTIES = "QUASIPARTICLE_STATE_PROPERTIES"
def __init__(self, output_filename):
try:
with open(output_filename, 'r') as f:
_data = f.readlines()
except Exception as e:
raise ResultException(str(e))
self._extractDataBlocks(_data)
self._processBlockStrings()
def _extractDataBlocks(self, str_lines, separator='%'):
"""
return the block structure as dictionaries for an array of string lines
by an starting separator.
"""
self.blocks = {}
_block_name = None
_block_lines = []
i = 0
while i < len(str_lines):
line = str_lines[i].replace('\n', '')
if not line.startswith(separator):
# initial credits (skip)
if _block_name and (line not in ('', '\n')):
_block_lines.append(line.strip())
else:
if _block_name:
self.blocks[_block_name] = _block_lines
_block_lines = []
_block_name = str_lines[i+1].strip().replace(' ', '_')
i += 2
i += 1
def _processBlockStrings(self):
for block in self.OutputBlocks.members():
|
{"/src_taurus_runner/t_runner.py": ["/src_taurus_runner/t_sources.py"], "/main.py": ["/src_taurus_runner/t_sources.py", "/src_taurus_runner/t_runner.py"]}
|
31,784
|
migueldelafuente1/taurus_runner
|
refs/heads/main
|
/src_taurus_runner/t_runner.py
|
'''
Created on Jan 7, 2021
@author: Miguel
'''
import os
import subprocess
import shutil
from src_taurus_runner.t_sources import InputSource, InteractionArgs, \
WaveFunctionArgs, IterationArgs, ConstrainsArgs, ParticleNumberArgs
from t_result import Result
class RunnerException(BaseException):
pass
class _Runner(object):
"""
Abstract class:
Run fortran programs and manage the output textfile results
"""
F90_PROGRAM = "taurus_vap.exe"
INPUT_FILENAME = InputSource.INPUT_FILENAME
OUTPUT_DIRECTORY= "output_folder"
OUTPUT_FILENAME_TEMPLATE = "output_{}.txt"
COMPLEMENTARY_FILES = None
log_file = OUTPUT_DIRECTORY + "logs.txt"
_index_folder = 0
_current_folder_name = ''
def __new__(self, *args, **kwargs):
"""
Create a directory and manage all mandatory attributes
"""
## Mandatory attributes
self.COMPLEMENTARY_FILES = self.OUTPUT_DIRECTORY+"/exe_files_"
self.input_source = None
self.output_filename = None
self.results = {}
## create directory for the output
_dir_name = self.OUTPUT_DIRECTORY
self._current_folder_name = self.OUTPUT_DIRECTORY
self._createFolder(_dir_name, new_calculation=True)
return object.__new__(self)
@classmethod
def _createFolder(cls, folder_name, new_calculation=False):
"""
Remove previous results from other calculations and create folder to
store results.
:new_calculation Resets a folder for a Runner execution (from __new__),
when folders are created inside a run the argument
is False (default).
"""
if new_calculation and (cls._index_folder > 0):
folder_name = cls._updateFolderIndex(folder_name)
_a = os.path.exists(folder_name)
_b = os.getcwd()
## create directory for the output
if os.path.exists(folder_name):
# remove elements in folder (for folder tree use shutil.rmtree)
_files = os.listdir(folder_name)
print("Files to remove:", _files)
if len(_files) > 0:
shutil.rmtree(folder_name)
else:
os.removedirs(folder_name)
if ('/' in folder_name) or ('\\' in folder_name):
os.makedirs(folder_name, exist_ok=True)
else:
os.mkdir(folder_name)
if new_calculation:
cls._index_folder += 1
@classmethod
def _updateFolderIndex(cls, folder_name):
""" When running a new Runner calculation, add an index to avoid
overwrite that results.
"""
dirs = os.listdir(os.getcwd())
dirs = filter(lambda d: d.startswith(folder_name), dirs)
folder_name = folder_name + "({})".format(len(list(dirs)))
_comp_files = cls.COMPLEMENTARY_FILES.replace(cls.OUTPUT_DIRECTORY,
folder_name)
cls.COMPLEMENTARY_FILES = _comp_files
cls._current_folder_name = folder_name
return folder_name
def __init__(self, *args, **kwargs):
raise RunnerException("Abstract class, implement me!")
def runProcess(self):
"""
Define here the iteration to be called from the main.
"""
# TODO: __iter__ / __next__ ??
raise RunnerException("Abstract method, implement me!")
# def __call__(self):
#
# ## create input
# self.inputProcess()
#
# ## run
# self._run()
#
# ## get basic data.
# self._getAllData()
#
#
# def inputProcess(self):
# raise RunnerException("Abstract method, implement me!")
def _run(self):
"""
Internal execution,
every runner works same here: from current input file, run and keep the
results in a text file. Then process by implementation that info.
"""
_e = subprocess.call('./{} < {} > {}'.format(self.F90_PROGRAM,
self.INPUT_FILENAME,
self.output_filename),
shell=True)
# a2 = subprocess.check_output('./Debug/program_2.exe',
# stderr=subprocess.STDOUT)
if not os.path.exists(self.output_filename):
raise Exception("Problem with the f90 program, it has not produced "
"the output file [{}]".format(self.output_filename))
# get data before move
self._getAllData()
self._moveAllResultsIntoFolder()
# shutil.copy(self.OUTPUT_FILENAME, self.OUTPUT_DIRECTORY)
def _moveAllResultsIntoFolder(self):
#TODO: change to move after debug
shutil.copy(self.output_filename, self._current_folder_name)
# create another auxiliary folder to keep the other files
aux_folder = self._getDataStorageDirectoryName()
self._createFolder(aux_folder)
for _file in Result.outputFilesEnum.members():
shutil.copy(_file, aux_folder)
def _getDataStorageDirectoryName(self):
"""
Return a name for the complementary data folder
"""
return self.COMPLEMENTARY_FILES
def _getAllData(self):
"""
Get info from output_file, acts on results collection.
"""
raise RunnerException("Abstract method, implement me!")
#===============================================================================
# IMPLEMENTATION OF BASIC RUNNERS
#===============================================================================
class SingleRunner(_Runner):
def __init__(self, Z, N, interaction, *arg, **kwargs):
assert type(Z) is int, "Z is not <int>. Got [{}]".format(Z)
assert type(N) is int, "N is not <int>. Got [{}]".format(N)
self.z = Z
self.n = N
self.interaction = interaction
self.optional_args = kwargs
def runProcess(self):
_args = self.optional_args
_args[ParticleNumberArgs.Param.Z_active] = self.z
_args[ParticleNumberArgs.Param.N_active] = self.n
_args[InteractionArgs.Param.interaction] = self.interaction
i_int = InteractionArgs(**_args)
i_pn = ParticleNumberArgs(**_args)
i_wf = WaveFunctionArgs(**_args)
i_iter = IterationArgs(**_args)
i_const = ConstrainsArgs(**_args)
self.input_source = InputSource(i_int, i_pn, i_wf, i_iter, i_const)
_str = "z{}n{}".format(self.z, self.n)
self.output_filename = self.OUTPUT_FILENAME_TEMPLATE.format(_str)
self._run()
def _getAllData(self):
_result = Result(self.output_filename)
self.resuls[self.input_source] = _result
def _getDataStorageDirectoryName(self):
return self.COMPLEMENTARY_FILES + "_z{}n{}".format(self.z, self._n)
class IsotopeRunner(_Runner):
OUTPUT_DIRECTORY = "output_isotopes"
def __init__(self, Z, N_list, interaction, *arg, **kwargs):
assert type(Z) is int, "Z is not <int>. Got [{}]".format(Z)
if type(N_list) is int:
N_list = [N_list]
if not isinstance(N_list, (list, tuple)):
raise RunnerException("N_list is not <list/tuple>. Got [{}]"
.format(N_list))
else:
if False in (isinstance(_n, int) for _n in N_list):
raise RunnerException("There is a non <int> N: [{}]".format(N_list))
self.z = Z
self.N_list = N_list
self.interaction = interaction
self.optional_args = kwargs
def runProcess(self):
i_int = InteractionArgs(interaction=self.interaction)
i_wf = WaveFunctionArgs()
i_iter = IterationArgs()
i_const = ConstrainsArgs()
for n in self.N_list:
self._n = n
_str = "z{}n{}".format(Z_active= self.z, N_active= self._n)
self.output_filename = self.OUTPUT_FILENAME_TEMPLATE.format(_str)
i_pn = ParticleNumberArgs(self.z, n)
self.input_source = InputSource(i_int, i_pn, i_wf, i_iter, i_const)
self._run()
def _getAllData(self):
""" From the output text file """
with open(self.output_filename, 'r') as f:
_data = f.readlines()
for line_ in _data:
if not line_.starstwith('Full H '):
continue
else:
print(line_)
break
def _getDataStorageDirectoryName(self):
return self.COMPLEMENTARY_FILES + "_z{}n{}".format(self.z, self._n)
class ConstraintsRunner(_Runner):
OUTPUT_DIRECTORY = "output_constr"
"""
Run one constraints with values are in list, fix other parameters with
default values giving single numerical values for the constraint.
:constraints_dict <dict> list of values to constraint for one/several
constraints available in <ConstraintArgs>
When values are not lists, the parameters will be treated as a default
constraint setting and not be iterated. These defaults will be shared
for all list/tuple groups of constrains
If you pass several lists, the executions will not nest between them, and
those parameters iterated will have the ConstraintArgs default value.
Example:
Execute the lists of Q20 values [-0.1, 0.0, 0.1] for J_z = 0.5 and
Q10 = 1.0 (Z, N, interaction given):
>>>
z, n, interaction = 2, 2, 'usbd'
_constrains = {
ConstrainsArgs.Param.constr_Q20 : [-0.1, 0.0, 0.1],
ConstrainsArgs.Param.constr_Jz : 0.5,
ConstrainsArgs.Param.constr_Q10 : 1.0
}
ir = ConstraintsRunner(z, n, interaction, _constrains)
ir.runProcess()
##
The results will be in three folders named after the list constraint and
the other fixed arguments:
"""
def __init__(self, Z, N, interaction, constraints_dict, *arg, **kwargs):
assert type(Z) is int, "Z is not <int>. Got [{}]".format(Z)
assert type(Z) is int, "N is not <int>. Got [{}]".format(N)
self.z = Z
self.n = N
self.interaction = interaction
# TODO: set a validator of available constrains in
if not isinstance(constraints_dict, dict):
raise RunnerException("beta_list is not dict or keyword argument."
" Got [{}]".format(constraints_dict))
self.__validateConstraintsInput()
self.constraints_dict = constraints_dict
self.constraint_beta_lm = None
if ConstrainsArgs.Param.constraint_beta_lm in kwargs:
self.constraint_beta_lm = kwargs.get(ConstrainsArgs.Param
.constraint_beta_lm)
self.optional_args = kwargs
def __validateConstraintsInput(self):
"""
Verify if all names in the constraints and lists are valid.
"""
_valid_prefixes = tuple(ConstrainsArgs.ParamConstrains.members())
_list_constraints = 0
for constr_name, constr_values in self.constraints_dict.items():
if not constr_name.startswith(_valid_prefixes):
raise RunnerException("Invalid constraint name. Got [{}]"
.format(constr_name))
if isinstance(constr_values, (list, tuple)):
if _list_constraints >= 1:
print("WARNING: There is already [{}] list of constrain to "
"iterate, multiple lists might mess with the default"
"constraints between them. \nExecute them in another "
"runner instance".format(_list_constraints))
_list_constraints += 1
elif isinstance(constr_values, (float, int)):
pass
else:
raise RunnerException("Invalid constraint value types. Got[{}]"
.format(constr_values))
if _list_constraints == 0:
# last constraint (single value) given will be set as a list.
print("WARNING: No list for constraint iteration was given, the "
"runner will define the last parameter [{}] as a dummy list "
"for execution. \n >> Use SingleRunner() to execute a simple "
"calculation with any single constraint given."
.format(constr_name))
self.constraints_dict[constr_name] = [constr_values]
def runProcess(self):
i_int = InteractionArgs(interaction= self.interaction)
i_pn = ParticleNumberArgs(Z_active= self.z, N_active= self.n)
i_wf = WaveFunctionArgs()
i_iter = IterationArgs()
i_const = ConstrainsArgs()
for constr_name, constr_values in self.constraints_dict:
for val in constr_values:
i_const.setConstraint(constr_name, val)
_str = "z{}n{}_".format(self.z,
self._n,
constr_name.replace('constr_'))
self.output_filename = self.OUTPUT_FILENAME_TEMPLATE.format(_str)
self.input_source = InputSource(i_int, i_pn, i_wf, i_iter, i_const)
self._run()
# return constraint to 0
i_const.disableConstraint(constr_name)
# TODO: hacer que las ligaduras que no sean listas se traten como
# valores a fijar para la ejecucin
def _getAllData(self):
""" From the output text file """
with open(self.output_filename, 'r') as f:
_data = f.readlines()
for line_ in _data:
if not line_.starstwith('Full H '):
continue
else:
print(line_)
break
def _getDataStorageDirectoryName(self):
_c_str = [c.replace('constr_') for c in self.constraints_dict.keys()]
_c_str = "_constr{}".format('_'.join(_c_str))
return self.COMPLEMENTARY_FILES + "_z{}n{}_".format(self.z,
self._n,
_c_str)
|
{"/src_taurus_runner/t_runner.py": ["/src_taurus_runner/t_sources.py"], "/main.py": ["/src_taurus_runner/t_sources.py", "/src_taurus_runner/t_runner.py"]}
|
31,785
|
migueldelafuente1/taurus_runner
|
refs/heads/main
|
/src_taurus_runner/t_sources.py
|
'''
Created on Jan 3, 2021
@author: Miguel
'''
import inspect
class Enum(object):
@classmethod
def members(cls):
result = []
for i in inspect.getmembers(cls):
name = i[0]
value = i[1]
if not name.startswith('_'):
if not inspect.ismethod(value):
result.append(value)
return result
#===============================================================================
# INPUT OBJECTS AND PARTS FOR THE INPUT FILE
#===============================================================================
class DataException(BaseException):
pass
class InputArgumentException(DataException):
""" Exception for invalid/missing keyword arguments for _Data subclasses """
pass
class _Data(object):
class Param(Enum):
pass
_TEMPLATE = None
# @classmethod
# def __new__(self, *args, **kwargs):
# return object.__new__(self, *args, **kwargs)
@classmethod
def __new__(cls, *args, **kwargs):
""" Check if the arguments are correctly settled in the _TEMPLATE
attribute for the class.
TODO: Might move it to a testing suite to avoid call it more than once.
"""
_not_in_temp = {}
for arg in cls.Param.members():
if arg not in cls._TEMPLATE:
_not_in_temp.add(arg)
if len(_not_in_temp) > 0:
raise DataException("Argument/s: {} do not match with _TEMPLATE"
.format(list(_not_in_temp)))
return super(_Data, cls).__new__(cls)
# def __init__(self, *args, **kwargs):
# raise DataException("Abstract method, implement me!")
def __init__(self, **kwargs):
"""
Define parameters, set in __setDefaultAttributes the mandatory arguments
for the class implementation and the default values.
"""
## check valid names for given Parameters
kwargs_aux = self._validateGivenParameters(kwargs)
self._arguments = kwargs_aux
self._setDefaultAttributes()
self._setGivenParameters()
def _validateGivenParameters(self, kwargs):
"""
Check valid names for given Parameters, if they do not belong to
this class, remove it from the current dictionary.
(that allow to implement all arguments in a single dictionary and pass
it to every _Data subclass)
"""
kwargs_aux = {}
for arg in kwargs:
if arg not in self.Param.members():
# ignore the argument
continue
#raise InputArgumentException
# print("Not a valid <{}> parameter: [{}]"
# .format(self.__class__.__name__, arg))
else:
kwargs_aux[arg] = kwargs.get(arg)
return kwargs_aux
def _setDefaultAttributes(self):
"""
Define mandatory arguments to be given and the default values.
"""
raise DataException("Abstract method, implement me!")
def _setGivenParameters(self):
"""
Iterate the given values, setattr method mediate by property setter in
case those arguments require process getting.
"""
for attr, val in self._arguments.items():
setattr(self, attr, val)
def getScientificFormat(self, value, digits=2):
""" Return the number in scientific format. """
_templ = "{:." + str(digits) + "e}"
return _templ.format(value)
def getStringData(self, kwargs):
""" get template formated with the object values."""
_str = self._TEMPLATE.format(**kwargs)
# clean left tabs
_str = '\n'.join([_s.lstrip() for _s in _str.split('\n')])
return _str
def __str__(self):
raise DataException("Abstract method, implement me!")
# TODO: Create a method that checks that all arguments in _TEMPLATE
# are in the Param enumeration
class InteractionArgs(_Data):
class Param(Enum):
interaction = 'interaction'
COM_correction = 'COM_correction'
read_reduced_Hamiltonian = 'read_reduced_Hamiltonian'
n_MPI_proc_per_H_team = 'n_MPI_proc_per_H_team'
_TEMPLATE = """Interaction
-----------
Master name hamil. files {interaction}
Center-of-mass correction {COM_correction}
Read reduced hamiltonian {read_reduced_Hamiltonian}
No. of MPI proc per H team {n_MPI_proc_per_H_team}
"""
# def __init__(self, **kwargs):
#
# self._arguments = kwargs
# self.__setDefaultAttributes()
# self._setGivenParameters(kwargs)
#
#
# self.__COM_correction = False
# self.__read_reduced_Hamiltonian = False
# self.__n_MPI_proc_per_H_team = False
def _setDefaultAttributes(self):
if not self.Param.interaction in self._arguments:
raise InputArgumentException("Interaction must be given.")
self.__interaction = self._arguments.get(self.Param.interaction)
self.__COM_correction = False
self.__read_reduced_Hamiltonian = False
self.__n_MPI_proc_per_H_team = False
@property
def interaction(self):
return self.__interaction
@interaction.setter
def interaction(self, value):
if not isinstance(value, str):
raise InputArgumentException("interaction name must be string")
# TODO: search valid values (put in property setter)
self.__interaction = value
@property
def COM_correction(self):
return self.__COM_correction * 1
@COM_correction.setter
def COM_correction(self, value):
self.__COM_correction = value
@property
def read_reduced_Hamiltonian(self):
return self.__read_reduced_Hamiltonian * 1
@read_reduced_Hamiltonian.setter
def read_reduced_Hamiltonian(self, value):
self.__read_reduced_Hamiltonian = value
@property
def n_MPI_proc_per_H_team(self):
return self.__n_MPI_proc_per_H_team * 1
@n_MPI_proc_per_H_team.setter
def n_MPI_proc_per_H_team(self, value):
self.__n_MPI_proc_per_H_team = value
def __str__(self):
""" Get the final string for input with current data"""
__kwargs = {
self.Param.interaction : self.interaction,
self.Param.COM_correction : self.COM_correction,
self.Param.read_reduced_Hamiltonian : self.read_reduced_Hamiltonian,
self.Param.n_MPI_proc_per_H_team : self.n_MPI_proc_per_H_team
}
return self.getStringData(__kwargs)
class ParticleNumberArgs(_Data):
class Param(Enum):
Z_active = 'Z_active'
N_active = 'N_active'
gauge_angles_p = 'gauge_angles_p'
gauge_angles_n = 'gauge_angles_n'
_TEMPLATE = """Particle Number
---------------
Number of active protons {Z_active}.00
Number of active neutrons {N_active}.00
No. of gauge angles protons {gauge_angles_p}
No. of gauge angles neutrons {gauge_angles_n}
"""
# def __init__(self, Z, N):
#
#
#
#
# self.__Z_active = Z
# self.__N_active = N
# self.__gauge_angles_p = 1
# self.__gauge_angles_n = 1
def _setDefaultAttributes(self):
if not self.Param.Z_active in self._arguments:
raise InputArgumentException("Z_active must be given.")
if not self.Param.N_active in self._arguments:
raise InputArgumentException("N_active must be given.")
self.__Z_active = self._arguments.get(self.Param.Z_active)
self.__N_active = self._arguments.get(self.Param.N_active)
self.__gauge_angles_p = 1
self.__gauge_angles_n = 1
@property
def Z_active(self):
return self.__Z_active
@Z_active.setter
def Z_active(self, value):
if not (isinstance(value, int) or (value < 0)):
raise InputArgumentException("Z:[{}] must be non-negative integer"
.format(value))
self.__Z_active = value
@property
def N_active(self):
return self.__N_active
@N_active.setter
def N_active(self, value):
if not (isinstance(value, int) or (value < 0)):
raise InputArgumentException("N:[{}] must be non-negative integer"
.format(value))
self.__N_active = value
@property
def gauge_angles_p(self):
return self.__gauge_angles_p
@gauge_angles_p.setter
def gauge_angles_p(self, value):
if not (isinstance(value, int) or (value <= 0)):
raise InputArgumentException("Proton gauge angles [{}] must be "
"positive integer".format(value))
self.__gauge_angles_p = value
@property
def gauge_angles_n(self):
return self.__gauge_angles_n
@gauge_angles_n.setter
def gauge_angles_n(self, value):
if not (isinstance(value, int) or (value <= 0)):
raise InputArgumentException("Neutron gauge angles [{}] must be "
"positive integer".format(value))
self.__gauge_angles_n = value
def __str__(self):
""" Get the final string for input with current data"""
__kwargs = {
self.Param.Z_active : self.Z_active,
self.Param.N_active : self.N_active,
self.Param.gauge_angles_p : self.__gauge_angles_p,
self.Param.gauge_angles_n : self.__gauge_angles_n
}
return self.getStringData(__kwargs)
class WaveFunctionArgs(_Data):
class Param(Enum):
seed_type = 'seed_type'
blocking_QP = 'blocking_QP'
symmetry_simplifications = 'symmetry_simplifications'
wf_file_as_text = 'wf_file_as_text'
cuttoff_occ_sp_states = 'cuttoff_occ_sp_states'
include_empty_sp_states = 'include_empty_sp_states'
_TEMPLATE = """Wave Function
-------------
Type of seed wave function {seed_type}
Number of QP to block {blocking_QP}
No symmetry simplifications {symmetry_simplifications}
Read/write wf file as text {wf_file_as_text}
Cutoff occupied s.-p. states {cuttoff_occ_sp_states}
Include all empty sp states {include_empty_sp_states}
"""
# def __init__(self, seed=0):
#
# self.seed_type = seed
# self.blocking_QP = 0
# self.symmetry_simplifications = 0
# self.wf_file_as_text = 0
# self.__cuttoff_occ_sp_states = 0.00e-00
# self.include_empty_sp_states = 0
def _setDefaultAttributes(self):
self.seed_type = 0
self.blocking_QP = 0
self.symmetry_simplifications = 0
self.wf_file_as_text = 0
self.__cuttoff_occ_sp_states = 0.00e-00
self.include_empty_sp_states = 0
@property
def cuttoff_occ_sp_states(self):
"""return the number in scientific format"""
return self.getScientificFormat(self.__cuttoff_occ_sp_states,
digits=2)
def __str__(self):
""" Get the final string for input with current data"""
__kwargs = {
self.Param.seed_type : self.seed_type,
self.Param.blocking_QP : self.blocking_QP,
self.Param.symmetry_simplifications : self.symmetry_simplifications,
self.Param.wf_file_as_text : self.wf_file_as_text,
self.Param.cuttoff_occ_sp_states : self.cuttoff_occ_sp_states,
self.Param.include_empty_sp_states : self.include_empty_sp_states
}
return self.getStringData(__kwargs)
class IterationArgs(_Data):
class Param(Enum):
iter_max = 'iter_max'
step_intermediate = 'step_intermediate'
log_prompt = 'log_prompt'
grad_type = 'grad_type'
grad_eta = 'grad_eta'
grad_mu = 'grad_mu'
grad_tol = 'grad_tol'
_TEMPLATE = """Iterative Procedure
-------------------
Maximum no. of iterations {iter_max}
Step intermediate wf writing {step_intermediate}
More intmermediate printing {log_prompt}
Type of gradient {grad_type}
Parameter eta for gradient {grad_eta}
Parameter mu for gradient {grad_mu}
Tolerance for gradient {grad_tol}
"""
# def __init__(self, iter_max=300):
#
# assert iter_max > 0, "iter_max:[{}] must be positive".format(iter_max)
def _setDefaultAttributes(self):
self.__iter_max = 300
self.step_intermediate = 0
self.log_prompt = 0
self.grad_type = 1
self.__grad_eta = 0.1
self.__grad_mu = 0.3
self.__grad_tol = 0.0001
@property
def iter_max(self):
"""return the number in scientific format"""
return self.__iter_max
@iter_max.setter
def iter_max(self, value):
if value <= 0:
raise InputArgumentException("iter_max:[{}] must be positive"
.format(value))
self.__iter_max = value
@property
def grad_eta(self):
"""return the number in scientific format"""
return self.getScientificFormat(self.__grad_eta, digits=3)
@grad_eta.setter
def grad_eta(self, value):
self.__grad_eta = value
@property
def grad_mu(self):
"""return the number in scientific format"""
return self.getScientificFormat(self.__grad_mu, digits=3)
@grad_mu.setter
def grad_mu(self, value):
self.__grad_mu = value
@property
def grad_tol(self):
"""return the number in scientific format"""
return self.getScientificFormat(self.__grad_tol, digits=3)
@grad_tol.setter
def grad_tol(self, value):
self.__grad_tol = value
def __str__(self):
""" Get the final string for input with current data"""
__kwargs = {
self.Param.iter_max : self.iter_max,
self.Param.step_intermediate : self.step_intermediate,
self.Param.log_prompt : self.log_prompt,
self.Param.grad_type : self.grad_type,
self.Param.grad_eta : self.grad_eta,
self.Param.grad_mu : self.grad_mu,
self.Param.grad_tol : self.grad_tol
}
return self.getStringData(__kwargs)
class ConstrainsArgs(_Data):
__doc__ = """
Section about the Constraints :
constraint_NZ (enforze_NZ): 0 1
constraint_beta_lm
"""
class Param(Enum):
constraint_NZ = 'constraint_NZ'
constraint_beta_lm = 'constraint_beta_lm'
pair_coupling_scheme = 'pair_coupling_scheme'
constraint_tol = 'constraint_tol'
constr_Q10 = 'constr_Q10'
constr_Q11 = 'constr_Q11'
constr_Q20 = 'constr_Q20'
constr_Q21 = 'constr_Q21'
constr_Q22 = 'constr_Q22'
constr_Q30 = 'constr_Q30'
constr_Q31 = 'constr_Q31'
constr_Q32 = 'constr_Q32'
constr_Q33 = 'constr_Q33'
constr_Q40 = 'constr_Q40'
constr_Q41 = 'constr_Q41'
constr_Q42 = 'constr_Q42'
constr_Q43 = 'constr_Q43'
constr_Q44 = 'constr_Q44'
constr_Jx = 'constr_Jx'
constr_Jy = 'constr_Jy'
constr_Jz = 'constr_Jz'
constr_P_T00_J10 = 'constr_P_T00_J10'
constr_P_T00_J1m1 = 'constr_P_T00_J1m1'
constr_P_T00_J1p1 = 'constr_P_T00_J1p1'
constr_P_T10_J00 = 'constr_P_T10_J00'
constr_P_T1m1_J00 = 'constr_P_T1m1_J00'
constr_P_T1p1_J00 = 'constr_P_T1p1_J00'
constr_Delta = 'constr_Delta'
class ParamConstrains(Enum):
constr_Q = 'constr_Q'
constr_J = 'constr_J'
constr_P_T = 'constr_P_T'
constr_Delta = 'constr_Delta'
_TEMPLATE = """Constraints
-----------
Force constraint N/Z {constraint_NZ}
Constraint beta_lm {constraint_beta_lm}
Pair coupling scheme {pair_coupling_scheme}
Tolerence for constraints {constraint_tol}
Constraint multipole Q10 {constr_Q10}
Constraint multipole Q11 {constr_Q11}
Constraint multipole Q20 {constr_Q20}
Constraint multipole Q21 {constr_Q21}
Constraint multipole Q22 {constr_Q22}
Constraint multipole Q30 {constr_Q30}
Constraint multipole Q31 {constr_Q31}
Constraint multipole Q32 {constr_Q32}
Constraint multipole Q33 {constr_Q33}
Constraint multipole Q40 {constr_Q40}
Constraint multipole Q41 {constr_Q41}
Constraint multipole Q42 {constr_Q42}
Constraint multipole Q43 {constr_Q43}
Constraint multipole Q44 {constr_Q44}
Constraint ang. mom. Jx {constr_Jx}
Constraint ang. mom. Jy {constr_Jy}
Constraint ang. mom. Jz {constr_Jz}
Constraint pair P_T00_J10 {constr_P_T00_J10}
Constraint pair P_T00_J1m1 {constr_P_T00_J1m1}
Constraint pair P_T00_J1p1 {constr_P_T00_J1p1}
Constraint pair P_T10_J00 {constr_P_T10_J00}
Constraint pair P_T1m1_J00 {constr_P_T1m1_J00}
Constraint pair P_T1p1_J00 {constr_P_T1p1_J00}
Constraint field Delta {constr_Delta}
"""
# @classmethod
# def __new__(cls, *args):
# # add constraints to template and the params
#
# ## constraint multipole Q
# for i in range(1, 5):
# for j in range(i+1):
# attr_ = 'constr_Q{}{}'
# name_ = 'Constraint multipole Q{} '.format(str(i)+str(j))
# name_ +='{} {}'
#
# setattr(cls.Param, attr_, (0, 0.000))
#
# ## constraint angular momentum
#
# ## constaint pair
# def __init__(self):
#
# self.constraint_NZ = 1
# self.constraint_beta_lm = 2
# self.pair_coupling_scheme = 1
# self.__constraint_tol = 1.000e-4
#
# _value_params = tuple(self.ParamConstrains.members())
# for constr_attr_ in self.Param.members():
# if not constr_attr_.startswith(_value_params):
# continue
# setattr(self, constr_attr_, (False, 0.000))
def _setDefaultAttributes(self):
self.__constraint_NZ = 1
self.__constraint_beta_lm = 2
self.__pair_coupling_scheme = 1
self.__constraint_tol = 1.000e-4
_value_params = tuple(self.ParamConstrains.members())
for constr_attr_ in self.Param.members():
if not constr_attr_.startswith(_value_params):
continue
setattr(self, constr_attr_, (False, 0.000))
@property
def constraint_NZ(self):
return self.__constraint_NZ * 1
@constraint_NZ.setter
def constraint_NZ(self, value):
if value not in (0, 1, True, False):
raise InputArgumentException(
"'constraint_NZ' can only be 0(False) or 1(True)")
self.__constraint_NZ = value
@property
def constraint_beta_lm(self):
return self.__constraint_beta_lm
@constraint_beta_lm.setter
def constraint_beta_lm(self, value):
if value not in (0, 1, 2):
raise InputArgumentException("'constraint_beta_lm' can only be: "
"0 [constrains applied to Q_lm directly], "
"1 [constrains applied to beta_lm dimensionless parameter] or "
"2 [1) but applied to triaxial parameters l=20(beta) and 22(gamma)]"
)
self.__constraint_beta_lm = value
@property
def pair_coupling_scheme(self):
return self.__pair_coupling_scheme * 1
@pair_coupling_scheme.setter
def pair_coupling_scheme(self, value):
if value not in (0, 1, True, False):
raise InputArgumentException(
"'pair_coupling_scheme' can only be 0('agnostic') or 1(seniority)")
self.__pair_coupling_scheme = value
@property
def constraint_tol(self):
"""return the number in scientific format"""
return self.getScientificFormat(self.__constraint_tol, digits=3)
@constraint_tol.setter
def constraint_tol(self, value):
self.__constraint_tol = value
def _setGivenParameters(self):
""" Overwrite method to set the specific constraints"""
_constrains = set()
for arg, value in self._arguments.items():
if arg.startswith(tuple(self.ParamConstrains.members())):
self.setConstraint(arg, value)
_constrains.add(arg)
for arg in _constrains:
del self._arguments[arg]
## set the arguments which are not constraint values.
super()._setGivenParameters()
def setConstraint(self, name, value):
""" Constraint value setter """
if not type(value) is float:
raise DataException("Value must be float, [<{}> {}] given".format(
type(value), value))
if name in self.Param.members():
if not name.startswith(tuple(self.ParamConstrains.members())):
raise DataException("[{}] constraint value is not assignable"
.format(name))
else:
raise DataException("[{}] invalid constraint name".format(name))
setattr(self, name, (True, round(value, 3)))
def disableConstraint(self, name):
""" Disable Constraint, reset boolean to False and value to 0.0 """
if name not in self.Param.members():
raise DataException("[{}] invalid constraint name".format(name))
setattr(self, name, (False, 0.0))
def __str__(self):
""" Get the final string for input with current data"""
__kwargs = {
self.Param.constraint_NZ : self.constraint_NZ,
self.Param.constraint_beta_lm : self.constraint_beta_lm,
self.Param.pair_coupling_scheme : self.pair_coupling_scheme,
self.Param.constraint_tol : self.constraint_tol
}
_constr_vals = []
_value_params = tuple(self.ParamConstrains.members())
for constr_attr_ in self.Param.members():
if not constr_attr_.startswith(_value_params):
continue
values = getattr(self, constr_attr_)
value_str = '{} {}'.format(values[0] * 1,
# self.getScientificFormat(values[1],
# digits=3))
round(values[1], 3))
#v_value = self.getScientificFormat(values[1], digits=3)
_constr_vals.append((constr_attr_, value_str))
__kwargs = {**__kwargs, **dict(_constr_vals)}
return self.getStringData(__kwargs)
class InputSourceException(BaseException):
pass
class InputSource():
__doc__ = """ Input composite object, insert in _parts when"""
_input_parts = [
InteractionArgs,
ParticleNumberArgs,
WaveFunctionArgs,
IterationArgs,
ConstrainsArgs
]
INPUT_FILENAME = "temp_input.txt"
# assert len(_parts) == sources.__dict__
class PartParams(Enum):
""" Data Input Classes to be set, automatically inserted """
@classmethod
def _setAttr(cls, _input_parts):
if len(cls.members()) > 0:
return
for part in _input_parts:
setattr(cls, part.__name__, part.__name__)
PartParams._setAttr(_input_parts)
_attribute_map = dict([(part, part[0].lower()+part[1:])
for part in PartParams.members()])
def __init__(self, *args):
"""
:args, all the Taurus input parametric sections
"""
#set default attributes
#for attr in self._attribute_map.values():
# setattr(self, attr, None)
self.interactionArgs = None
self.particleNumberArgs = None
self.waveFunctionArgs = None
self.iterationArgs = None
self.constrainsArgs = None
self._setArgumentParts(args)
def _setArgumentParts(self, args):
if len(args) != len(self._input_parts):
raise InputSourceException("There is/are [{}] missing or extra Data"
" arguments for the input".format(len(args)-len(self._input_parts)))
if False in (isinstance(_a, _Data) for _a in args):
raise InputSourceException("There are invalid data type objects for"
" objects {}, must inherit from _Data"
.format(str([_a.__class__.__bases__ for _a in args])))
for arg in args:
try:
setattr(self, self._attribute_map[arg.__class__.__name__], arg)
# the key error will raise before the Assignment exception
except KeyError:
raise InputSourceException(
"_Data object given [{}] is not a valid part."
.format(arg.__class__.__name__))
def updateArgument(self, arg):
""" Change an argument without re-instance the class. """
setattr(self, self._attribute_map[arg.__class__.__name__], arg)
def createInputFile(self):
""" Creates a well defined TAURUS input file for the program to run. """
_str = []
_str.append(str(self.interactionArgs))
_str.append(str(self.particleNumberArgs))
_str.append(str(self.waveFunctionArgs))
_str.append(str(self.iterationArgs))
_str.append(str(self.constrainsArgs))
_str = "\n".join(_str)
with open('input_automated.txt', 'w+') as f:
f.write(_str)
|
{"/src_taurus_runner/t_runner.py": ["/src_taurus_runner/t_sources.py"], "/main.py": ["/src_taurus_runner/t_sources.py", "/src_taurus_runner/t_runner.py"]}
|
31,786
|
migueldelafuente1/taurus_runner
|
refs/heads/main
|
/main.py
|
'''
Created on Jan 3, 2021
@author: Miguel
'''
from src_taurus_runner.t_sources import InteractionArgs, WaveFunctionArgs, \
ParticleNumberArgs, IterationArgs, ConstrainsArgs, InputSource
from src_taurus_runner.t_runner import IsotopeRunner, ConstraintsRunner
from t_runner import SingleRunner
if __name__ == '__main__':
_args = {
InteractionArgs.Param.interaction : 'usbd',
InteractionArgs.Param.COM_correction : True,
InteractionArgs.Param.read_reduced_Hamiltonian : True
}
i1 = InteractionArgs(**_args)
# i1 = InteractionArgs('usdb')
i2 = WaveFunctionArgs()
_args = {
ParticleNumberArgs.Param.Z_active : 2,
ParticleNumberArgs.Param.N_active : 2,
ParticleNumberArgs.Param.gauge_angles_n : 5
}
i3 = ParticleNumberArgs(**_args)
i4 = IterationArgs()
_args = {
ConstrainsArgs.Param.constr_Q40 : 1.50,
ConstrainsArgs.Param.constraint_NZ : False
}
i5 = ConstrainsArgs(**_args)
i5.setConstraint(ConstrainsArgs.Param.constr_Q10, -0.1)
_input = InputSource(i1, i2, i3, i4, i5)
_input.createInputFile()
ir = SingleRunner(2, 2, 'usdb')
ir.runProcess()
#
# z = 0
# n_list = [i for i in range(0, 5, 2)]
# interaction = 'usdb'
#
# ir = IsotopeRunner(z, n_list, interaction)
# ir.runProcess()
#
# _constrains = {ConstrainsArgs.Param.constr_Q20 : [-0.1, 0.0, 0.1, 0.2, 0.3],
# ConstrainsArgs.Param.constr_Jz : 0.5,
# ConstrainsArgs.Param.constr_Q10 : [0.5]}
# ir = ConstraintsRunner(z, n_list, interaction, _constrains)
# ir.runProcess()
|
{"/src_taurus_runner/t_runner.py": ["/src_taurus_runner/t_sources.py"], "/main.py": ["/src_taurus_runner/t_sources.py", "/src_taurus_runner/t_runner.py"]}
|
31,794
|
Rui-code/test-game
|
refs/heads/master
|
/asteroid.py
|
import pygame
import random
class Asteroid(pygame.sprite.Sprite):
def __init__(self, group):
super().__init__(group)
self.image = pygame.image.load('data2/asteroid.png').convert_alpha()
self.image = pygame.transform.scale(self.image, [70, 70])
self.rect = self.image.get_rect()
self.speed = 1 + random.random() * 2
def update(self):
self.rect[0] -= self.speed
if self.rect[0] < -300:
self.kill()
|
{"/main2.py": ["/asteroid.py", "/pink_ship.py", "/projectile.py"]}
|
31,795
|
Rui-code/test-game
|
refs/heads/master
|
/pink_ship.py
|
import pygame
class PinkShip(pygame.sprite.Sprite):
def __init__(self, group):
super().__init__(group)
self.image = pygame.image.load('data2/pink_ship.png').convert_alpha()
self.image = pygame.transform.scale(self.image, [80, 65])
self.rect = self.image.get_rect()
self.rect[0] = 40
self.rect[1] = 220
self.speed = 0
self.acceleration = 0.1
def update(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.speed -= self.acceleration
elif keys[pygame.K_s]:
self.speed += self.acceleration
else:
self.speed *= 0.9
self.rect[1] += self.speed
if self.rect[1] < 0:
self.rect[1] = 0
self.speed = 0
elif self.rect[1] > 415:
self.rect[1] = 415
self.speed = 0
|
{"/main2.py": ["/asteroid.py", "/pink_ship.py", "/projectile.py"]}
|
31,796
|
Rui-code/test-game
|
refs/heads/master
|
/projectile.py
|
import pygame
import random
class Projectile(pygame.sprite.Sprite):
def __init__(self, group):
super().__init__(group)
self.image = pygame.image.load('data2/projectile.png').convert_alpha()
self.image = pygame.transform.scale(self.image, [20, 20])
self.rect = self.image.get_rect()
self.speed = 4
def update(self):
self.rect[0] += self.speed
if self.rect[0] > 840:
self.kill()
|
{"/main2.py": ["/asteroid.py", "/pink_ship.py", "/projectile.py"]}
|
31,797
|
Rui-code/test-game
|
refs/heads/master
|
/main2.py
|
import os
import sys
import pygame
import random
from asteroid import Asteroid
from pink_ship import PinkShip
from projectile import Projectile
dir_path = os.getcwd()
sys.path.append(dir_path)
if getattr(sys, 'frozen', False):
os.chdir(sys._MEIPASS)
pygame.init()
SCREEN_RES = [840, 480]
display = pygame.display.set_mode(SCREEN_RES)
pygame.display.set_caption('Ninja 2D - Aula 02')
# Background
bg_group = pygame.sprite.Group()
bg_image = pygame.sprite.Sprite(bg_group)
bg_image.image = pygame.image.load('data2/desert.png')
bg_image.image = pygame.transform.scale(bg_image.image, SCREEN_RES)
bg_image.rect = bg_image.image.get_rect()
# Asteroids
asteroid_group = pygame.sprite.Group()
# Player
player_group = pygame.sprite.Group()
player = PinkShip(player_group)
# Shots
shots_group = pygame.sprite.Group()
# Musics
pygame.mixer.music.load('data2/intro_music.wav')
pygame.mixer.music.play(-1)
# Sounds
shot_sound = pygame.mixer.Sound('data2/shot_sound.ogg')
clock = pygame.time.Clock()
tick_timer = 0
game_over = False
game_loop = True
while game_loop:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_loop = False
elif event.type == pygame.MOUSEBUTTONDOWN:
shot_sound.play()
new_shot = Projectile(shots_group)
new_shot.rect.center = player.rect.center
# Logic
if not game_over:
tick_timer += 1
if tick_timer > 30:
tick_timer = 0
if random.random() < 0.4:
new_asteroid = Asteroid(asteroid_group)
x, y = SCREEN_RES[0], random.randint(0, SCREEN_RES[1] - 100)
new_asteroid.rect.move_ip(x, y)
bg_group.update()
asteroid_group.update()
player_group.update()
shots_group.update()
collisions = pygame.sprite.spritecollide(player, asteroid_group, False)
if collisions:
print("Game Over!")
game_over = True
hits = pygame.sprite.groupcollide(shots_group, asteroid_group, True, True)
# Draw
display.fill([25, 25, 25])
bg_group.draw(display)
shots_group.draw(display)
player_group.draw(display)
asteroid_group.draw(display)
pygame.display.update()
|
{"/main2.py": ["/asteroid.py", "/pink_ship.py", "/projectile.py"]}
|
31,809
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/utils/test_utils.py
|
from os import listdir
from os.path import isfile, join
import base64
import gzip
import sys
import numpy
import os
import random
import re
import pickle
# sys.path.append("../")
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import configs.config
# import lib.model_cnn
from collections import namedtuple
corpora_dir = configs.config.TEST_DIR
unzip_files_dir = configs.config.TEST_DIR_UNZIP
Page = namedtuple("Page", "url, html, text, mime_type, encoding, lang")
def extract_domain(file):
reload(sys)
sys.setdefaultencoding('utf-8')
dict_url_text_en = {}
dict_url_text_fr = {}
for line in decode_file(join(corpora_dir, file)):
if line.lang == 'fr':
if isinstance(line.text, unicode):
dict_url_text_fr[line.url] = line.text
# dict_url_en.append(line.url)
else:
dict_url_text_fr[line.url.encode('utf-8')] = line.text.encode('utf-8')
# dict_url_en.append(line.url.encode('utf-8'))
elif line.lang == 'en':
if isinstance(line.text, unicode):
dict_url_text_en[line.url] = line.text
# dict_url_fr.append(line.url)
else:
dict_url_text_en[line.url.encode('utf-8')] = line.text.encode('utf-8')
# dict_url_fr.append(line.url.encode('utf-8'))
else:
continue
# print 'ok'
return dict_url_text_fr, dict_url_text_en
def extract_all():
# files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and f.endswith('gz')]
files_list = ["www.conidia.fr.lett.gz"]
for file in files_list:
extract_dict_fr, extract_dict_en = extract_domain(file)
file_name = "".join(list(file)[:-3])
file_name_en = file_name+'.en'
file_name_fr = file_name+'.fr'
with open(join(unzip_files_dir,file_name_en), 'w') as file_p:
pickle.dump(extract_dict_en, file_p, pickle.HIGHEST_PROTOCOL)
with open(join(unzip_files_dir,file_name_fr), 'w') as file_p:
pickle.dump(extract_dict_fr, file_p, pickle.HIGHEST_PROTOCOL)
print 'ok'
def generate_pairs(file_name):
abs_distance = 10
file_name_en = file_name+'.en'
file_name_fr = file_name+'.fr'
with open(join(unzip_files_dir,file_name_en),'r') as file_p:
extract_dict_en = pickle.load(file_p)
with open(join(unzip_files_dir,file_name_fr), 'r') as file_p:
extract_dict_fr = pickle.load(file_p)
en_list = extract_dict_en.keys()
fr_list = extract_dict_fr.keys()
print len(extract_dict_en)
print len(extract_dict_fr)
count = 0
with open(join(unzip_files_dir, file_name+".pairs"), 'w') as f:
for en_web in en_list:
length_en = len(en_web)
abs_distance = length_en / 5
for fr_web in fr_list:
if abs(len(extract_dict_en[en_web]) - len(extract_dict_fr[fr_web])) < abs_distance:
count +=1
print count
print en_web,'\t',fr_web
f.write(en_web)
f.write('\t')
f.write(fr_web)
f.write('\n')
def load_translation(file_name):
url_dict = {}
domain = file_name[:-8]
print domain
with open('../data/en_train_trans.out') as en_train_trans:
flag = False
lines = en_train_trans.readlines()
for url, text in zip(lines[::2],lines[1::2]):
url = url.strip()
text = text.strip()
# line_url = en_train_trans.readline()
# # print line_url
# line_text = en_train_trans.readline()
if re.search(domain, url) != None:
url_dict[url] = process_sentence.tokenize_text(text)
# flag = True
# # print line_url, files_name
# elif flag == True:
# break
# print url_dict
return url_dict
def bleu_test():
files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and (f.endswith('lett') or f.endswith('gz'))]
match_url = []
count = 0
with open('../data/train.pairs') as pairs:
for pair in pairs:
match_url.append(pair)
predict_file = open('../data/predict_unlimit.pairs','w')
for file_name in files_list[:1]:
url_text_trans = load_translation(file_name)
print file_name,len(url_text_trans)
dict_url_text, dict_url_en, dict_url_fr = extract_domain(file_name)
en_text_list = []
print 'extract ok'
reference_list = []
time_start = time.time()
for url in dict_url_fr:
pos = -1
score_list = []
text = url_text_trans[url]
for en_url in dict_url_en:
en_text = process_sentence.tokenize_text(dict_url_text[en_url])
# print en_text
score_list.append(sentence_bleu(en_text, text))
print max(score_list)
pos = score_list.index(max(score_list))
# if pos >= len(dict_url_en):
# print "pos error at", url,'\t',en_url
# continue
# if pos < 0:
# print 'pos < 0 at', url,'\t',en_url
en_url_pred = dict_url_en[pos]
pre = str(en_url_pred) + '\t' + str(url)
print pre
predict_file.write(pre)
predict_file.write('\n')
if pre in match_url:
count +=1
time_end = time.time()
print (time_end - time_start),'for',file_name,'\t',count
predict_file.close()
print count
def decode_file(file):
fh = file
flag = False
if file.endswith('gz'):
flag = True
if flag:
f = gzip.open(fh)
else:
f = open(fh)
for line in f:
lang, mime, enc, url, html, text = line.split("\t")
html = base64.b64decode(html)
text = base64.b64decode(text)
html = html.decode("utf-8")
text = text.decode("utf-8")
p = Page(url, html, text, mime, enc, lang)
yield p
f.close()
def get_translation_for_url():
text_list = []
url = " "
url_last = "http://1d-aquitaine.com/"
en_text_trans = open('../data/test/en_text_trans.out','w')
with open('../data/test/translations.test/url2text.en') as en_lines:
for line in en_lines:
content = line.split()
url_new = content[0]
text = '\t'.join(content[1:])
if url_last == url_new:
text_list.append(text)
else:
# print url_last
en_text_trans.write(url_last)
en_text_trans.write('\n')
en_text_trans.write('\t'.join(text_list))
en_text_trans.write('\n')
url_last = url_new
text_list = []
en_text_trans.close()
if __name__ == '__main__':
# extract_all()
# print 'extract_all'
# generate_pairs('www.conidia.fr.lett')
get_translation_for_url()
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,810
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/lib/wordVec.py
|
# -*- coding=utf-8 -*-
import numpy as np
from scipy.spatial.distance import cosine
def load_wordVec_mem(vector_file):
vector_dict = {}
with open(vector_file) as vector:
# wordslist = vector.readlines()
for word in vector:
tmp = word.split()
vector_dict[tmp[0]] = tmp[1:]
return vector_dict
def distance_two_vect(word1, word2, method= 'Cosine'):
# method = method
vect1 = np.asarray(word1, dtype='float64')
vect2 = np.asarray(word2, dtype='float64')
distance = 0
if method == 'Cosine':
distence = cosine(vect1, vect2)
elif method == 'Jaccard':
pass
elif method == 'Euclidean':
pass
elif method == 'CrossEntropy':
pass
else:
raise NameError("no such method:"+method)
return distance
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,811
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/utils/tmp.py
|
import re
re_obj = re.compile('((?<=http://)(\w+-?\w\\.?)+?(?=/))')
para_en_for_train = open('../data/para_for_train.en','w')
para_fr_for_train = open('../data/para_for_train.fr','w')
with open('../data/para.en') as para_en:
for line in para_en:
if re.search('http://', line):
continue
else:
para_en_for_train.write(line)
with open('../data/para.fr') as para_fr:
for line in para_fr:
if re.search('http://', line):
continue
else:
para_fr_for_train.write(line)
para_en_for_train.close()
para_fr_for_train.close()
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.