index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
77,509 | sckott/habanero | refs/heads/main | /habanero/cn/__init__.py | # -*- coding: utf-8 -*-
from .cn import content_negotiation
from .styles import csl_styles
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,510 | sckott/habanero | refs/heads/main | /habanero/cn/constants.py | cn_base_url = "https://doi.org"
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,511 | sckott/habanero | refs/heads/main | /habanero/crossref/__init__.py | # -*- coding: utf-8 -*-
from .crossref import Crossref
from .workscontainer import WorksContainer
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,512 | sckott/habanero | refs/heads/main | /test/test-random_dois.py | import pytest
from habanero import Crossref
cr = Crossref()
@pytest.mark.vcr
def test_random_dois():
"""random dois"""
res = cr.random_dois()
assert list == res.__class__
assert str == res[0].__class__
assert 10 == len(res)
@pytest.mark.vcr
def test_random_dois_sample_param():
"""random dois - sample parameter"""
res = cr.random_dois(3)
assert 3 == len(res)
res = cr.random_dois(5)
assert 5 == len(res)
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,513 | sckott/habanero | refs/heads/main | /habanero/cn/cn.py | from ..cnrequest import CNRequest
from .constants import cn_base_url
def content_negotiation(
ids: str,
format: str = "bibtex",
style: str = "apa",
locale: str = "en-US",
url: str = None,
**kwargs
) -> str:
"""
Get citations in various formats from CrossRef
Supports DOIs from Crossref, Datacite and Medra
:param ids: required. a single DOI or many DOIs, each a string. If many
passed in, do so in a list
:param format: Name of the format. One of "rdf-xml", "turtle", "citeproc-json",
"citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml",
"datacite-xml","bibentry", or "crossref-tdm"
:param style: A CSL style (for text format only). See :func:`~habanero.cn.csl_styles`
for options. Default: "apa". If there's a style that CrossRef doesn't support
you'll get a `(500) Internal Server Error`
:param locale: Language locale. See `locale.locale_alias`
:param url: Base URL for the content negotiation request. Default: `https://doi.org`
:param kwargs: any additional arguments will be passed on to `requests.get`
:rtype: str, which can be parsed to various formats depending on what
format you request (e.g., JSON vs. XML vs. bibtex)
See https://citation.crosscite.org/docs.html for details
To make for a nicer user experience, when more than one DOI is passed to
`ids` we'll throw a warning instead of stopping with an error to be able
to try to fetch data for all DOIs given. When only one DOI is passed to
`ids` we will fail with error message. A UserWarning is thrown when a DOI
is not found or there's a problem with fetching the citation for a DOI.
You can suppress these warnings with :code:`warnings.filterwarnings('ignore')`.
Usage::
from habanero import cn
cn.content_negotiation(ids = "10.1126/science.169.3946.635")
# A Medra DOI
cn.content_negotiation(ids = "10.1400/22888")
# get citeproc-json
cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json")
# some other formats
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text")
# return an R bibentry type
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry")
# return an apa style citation
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos")
# Using DataCite DOIs
## some formats don't work
# cn.content_negotiation(ids = "10.15468/t4rau8", format = "crossref-xml")
## But most do work
cn.content_negotiation(ids = "10.15468/t4rau8", format = "text")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "crossref-tdm")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "datacite-xml")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "rdf-xml")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "turtle")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "ris")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "bibtex")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "bibentry")
cn.content_negotiation(ids = "10.15468/t4rau8", format = "bibtex")
# many DOIs
dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217']
x = cn.content_negotiation(ids = dois)
# Use a different base url
url = "http://dx.doi.org"
cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url)
"""
if url is None:
url = cn_base_url
return CNRequest(url, ids, format, style, locale, **kwargs)
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,514 | sckott/habanero | refs/heads/main | /habanero/request.py | import warnings
import requests
from .exceptions import RequestError
from .filterhandler import filter_handler
from .habanero_utils import (
check_json,
filter_dict,
ifelsestr,
is_json,
make_ua,
parse_json_err,
rename_query_filters,
)
from .request_class import Request
def request(
mailto,
ua_string,
url,
path,
ids=None,
query=None,
filter=None,
offset=None,
limit=None,
sample=None,
sort=None,
order=None,
facet=None,
select=None,
works=None,
cursor=None,
cursor_max=None,
agency=False,
progress_bar=False,
should_warn=False,
**kwargs
):
"""HTTP request helper."""
warning_thrown = False
url = url + path
if cursor_max.__class__.__name__ != "NoneType":
if cursor_max.__class__ != int:
raise ValueError("cursor_max must be of class int")
filt = filter_handler(filter)
if select.__class__ is list:
select = ",".join(select)
payload = {
"query": query,
"filter": filt,
"offset": offset,
"rows": limit,
"sample": sample,
"sort": sort,
"order": order,
"facet": facet,
"select": select,
"cursor": cursor,
}
# convert limit/offset to str before removing None
# b/c 0 (zero) is falsey, so that param gets dropped
payload["offset"] = ifelsestr(payload["offset"])
payload["rows"] = ifelsestr(payload["rows"])
# remove params with value None
payload = dict((k, v) for k, v in payload.items() if v)
# add query filters
payload.update(filter_dict(kwargs))
# rename query filters
payload = rename_query_filters(payload)
if ids.__class__.__name__ == "NoneType":
url = url.strip("/")
try:
r = requests.get(url, params=payload, headers=make_ua(mailto, ua_string))
r.raise_for_status()
except requests.exceptions.HTTPError:
if is_json(r):
raise RequestError(r.status_code, parse_json_err(r))
else:
r.raise_for_status()
except requests.exceptions.RequestException as e:
raise e
check_json(r)
coll = r.json()
else:
if ids.__class__.__name__ == "str":
ids = ids.split()
if ids.__class__.__name__ == "int":
ids = [ids]
# should_warn = len(ids) > 1
coll = []
for i in range(len(ids)):
if works:
res = Request(
mailto,
ua_string,
url,
str(ids[i]) + "/works",
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
cursor,
cursor_max,
None,
progress_bar,
**kwargs
).do_request(should_warn=should_warn)
coll.append(res)
else:
if agency:
endpt = url + str(ids[i]) + "/agency"
else:
endpt = url + str(ids[i])
endpt = endpt.strip("/")
r = requests.get(
endpt, params=payload, headers=make_ua(mailto, ua_string)
)
if r.status_code > 201 and should_warn:
warning_thrown = True
mssg = "%s on %s: %s" % (r.status_code, ids[i], r.reason)
warnings.warn(mssg)
else:
r.raise_for_status()
if warning_thrown:
coll.append(None)
else:
check_json(r)
js = r.json()
coll.append(js)
warning_thrown = False
if len(coll) == 1:
coll = coll[0]
return coll
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,515 | sckott/habanero | refs/heads/main | /habanero/crossref/filters.py | from typing import Dict
works_filter_details: Dict[str, dict] = {
"has_funder": {
"possible_values": None,
"description": "metadata which includes one or more funder entry",
},
"funder": {
"possible_values": "{funder_id}",
"description": "metadata which include the {funder_id} in FundRef data",
},
"location": {
"possible_values": "{country_name}",
"description": "funder records where location = {country name}. Only works on /funders route",
},
"prefix": {
"possible_values": "{owner_prefix}",
"description": "metadata belonging to a DOI owner prefix {owner_prefix} (e.g. '10.1016' )",
},
"member": {
"possible_values": "{member_id}",
"description": "metadata belonging to a CrossRef member",
},
"from_index_date": {
"possible_values": "{date}",
"description": "metadata indexed since (inclusive) {date}",
},
"until_index_date": {
"possible_values": "{date}",
"description": "metadata indexed before (inclusive) {date}",
},
"from_deposit_date": {
"possible_values": "{date}",
"description": "metadata last (re)deposited since (inclusive) {date}",
},
"until_deposit_date": {
"possible_values": "{date}",
"description": "metadata last (re)deposited before (inclusive) {date}",
},
"from_update_date": {
"possible_values": "{date}",
"description": "Metadata updated since (inclusive) {date} Currently the same as 'from_deposit_date'",
},
"until_update_date": {
"possible_values": "{date}",
"description": "Metadata updated before (inclusive) {date} Currently the same as 'until_deposit_date'",
},
"from_created_date": {
"possible_values": "{date}",
"description": "metadata first deposited since (inclusive) {date}",
},
"until_created_date": {
"possible_values": "{date}",
"description": "metadata first deposited before (inclusive) {date}",
},
"from_pub_date": {
"possible_values": "{date}",
"description": "metadata where published date is since (inclusive) {date}",
},
"until_pub_date": {
"possible_values": "{date}",
"description": "metadata where published date is before (inclusive) {date}",
},
"from_online_pub_date": {
"possible_values": "{date}",
"description": "metadata where online published date is since (inclusive) {date}",
},
"until_online_pub_date": {
"possible_values": "{date}",
"description": "metadata where online published date is before (inclusive) {date}",
},
"from_print_pub_date": {
"possible_values": "{date}",
"description": "metadata where print published date is since (inclusive) {date}",
},
"until_print_pub_date": {
"possible_values": "{date}",
"description": "metadata where print published date is before (inclusive) {date}",
},
"from_posted_date": {
"possible_values": "{date}",
"description": "metadata where posted date is since (inclusive) {date}",
},
"until_posted_date": {
"possible_values": "{date}",
"description": "metadata where posted date is before (inclusive) {date}",
},
"from_accepted_date": {
"possible_values": "{date}",
"description": "metadata where accepted date is since (inclusive) {date}",
},
"until_accepted_date": {
"possible_values": "{date}",
"description": "metadata where accepted date is before (inclusive) {date}",
},
"has_license": {
"possible_values": None,
"description": "metadata that includes any '<license_ref>' elements",
},
"license_url": {
"possible_values": "{url}",
"description": "metadata where '<license_ref>' value equals {url}",
},
"license_version": {
"possible_values": "{string}",
"description": "metadata where the '<license_ref>''s 'applies_to' attribute is '{string}'",
},
"license_delay": {
"possible_values": "{integer}",
"description": "metadata where difference between publication date and the '<license_ref>''s 'start_date' attribute is <= '{integer}' (in days",
},
"has_full_text": {
"possible_values": None,
"description": "metadata that includes any full text '<resource>' elements_",
},
"full_text_version": {
"possible_values": "{string}",
"description": "metadata where '<resource>' element's 'content_version' attribute is '{string}'",
},
"full_text_type": {
"possible_values": "{mime_type}",
"description": "metadata where '<resource>' element's 'content_type' attribute is '{mime_type}' (e.g. 'application/pdf')",
},
"full_text_application": {
"possible_values": "{string}",
"description": "metadata where <resource> link has one of the following intended applications: text-mining, similarity-checking or unspecified",
},
"has_references": {
"possible_values": None,
"description": "metadata for works that have a list of references",
},
"has_archive": {
"possible_values": None,
"description": "metadata which include name of archive partner",
},
"archive": {
"possible_values": "{string}",
"description": "metadata which where value of archive partner is '{string}'",
},
"has_orcid": {
"possible_values": None,
"description": "metadata which includes one or more ORCIDs",
},
"has_authenticated_orcid": {
"possible_values": None,
"description": "metadata which includes one or more ORCIDs where the depositing publisher claims to have witness the ORCID owner authenticate with ORCID",
},
"orcid": {
"possible_values": "{orcid}",
"description": "metadata where '<orcid>' element's value = '{orcid}'",
},
"issn": {
"possible_values": "{issn}",
"description": "metadata where record has an ISSN = '{issn}' Format is 'xxxx_xxxx'.",
},
"type": {
"possible_values": "{type}",
"description": "metadata records whose type = '{type}' Type must be an ID value from the list of types returned by the '/types' resource",
},
"directory": {
"possible_values": "{directory}",
"description": "metadata records whose article or serial are mentioned in the given '{directory}'. Currently the only supported value is 'doaj'",
},
"doi": {
"possible_values": "{doi}",
"description": "metadata describing the DOI '{doi}'",
},
"updates": {
"possible_values": "{doi}",
"description": "metadata for records that represent editorial updates to the DOI '{doi}'",
},
"is_update": {
"possible_values": None,
"description": "metadata for records that represent editorial updates",
},
"has_update_policy": {
"possible_values": None,
"description": "metadata for records that include a link to an editorial update policy",
},
"container_title": {
"possible_values": None,
"description": "metadata for records with a publication title exactly with an exact match",
},
"category_name": {
"possible_values": None,
"description": "metadata for records with an exact matching category label",
},
"type_name": {
"possible_values": None,
"description": "metadata for records with an exacty matching type label",
},
"award_number": {
"possible_values": "{award_number}",
"description": "metadata for records with a matching award nunber_ Optionally combine with 'award_funder'",
},
"award_funder": {
"possible_values": "{funder doi or id}",
"description": "metadata for records with an award with matching funder. Optionally combine with 'award_number'",
},
"has_assertion": {
"possible_values": None,
"description": "metadata for records with any assertions",
},
"assertion_group": {
"possible_values": None,
"description": "metadata for records with an assertion in a particular group",
},
"assertion": {
"possible_values": None,
"description": "metadata for records with a particular named assertion",
},
"has_affiliation": {
"possible_values": None,
"description": "metadata for records that have any affiliation information",
},
"alternative_id": {
"possible_values": None,
"description": "metadata for records with the given alternative ID, which may be a publisher_specific ID, or any other identifier a publisher may have provided",
},
"article_number": {
"possible_values": None,
"description": "metadata for records with a given article number",
},
"has_abstract": {
"possible_values": None,
"description": "metadata for records which include an abstract",
},
"has_clinical_trial_number": {
"possible_values": None,
"description": "metadata for records which include a clinical trial number",
},
"content_domain": {
"possible_values": None,
"description": "metadata where the publisher records a particular domain name as the location Crossmark content will appear",
},
"has_content_domain": {
"possible_values": None,
"description": "metadata where the publisher records a domain name location for Crossmark content",
},
"has_crossmark_restriction": {
"possible_values": None,
"description": "metadata where the publisher restricts Crossmark usage to content domains",
},
"has_relation": {
"possible_values": None,
"description": "metadata for records that either assert or are the object of a relation",
},
"relation_type": {
"possible_values": None,
"description": "One of the relation types from the Crossref relations schema (e.g. is-referenced-by, is-parent-of, is-preprint-of)",
},
"relation_object": {
"possible_values": None,
"description": "Relations where the object identifier matches the identifier provided",
},
"relation_object_type": {
"possible_values": None,
"description": "One of the identifier types from the Crossref relations schema (e.g. doi, issn)",
},
"public_references": {
"possible_values": None,
"description": "metadata where publishers allow references to be distributed publically",
},
"publisher_name": {
"possible_values": None,
"description": "metadata for records with an exact matching publisher name",
},
"affiliation": {
"possible_values": None,
"description": "metadata for records with at least one contributor with the given affiliation",
},
}
members_filter_details: Dict[str, dict] = {
"has_public_references": {
"possible_values": None,
"description": "member has made their references public for one or more of their prefixes",
},
"reference_visibility": {
"possible_values": ["open", "limited", "closed"],
"description": "members who have made their references either open, limited (to Metadata Plus subscribers) or closed",
},
"backfile_doi_count": {
"possible_values": "{integer}",
"description": "count of DOIs for material published more than two years ago",
},
"current_doi_count": {
"possible_values": "{integer}",
"description": "count of DOIs for material published within last two years",
},
}
funders_filter_details: Dict[str, dict] = {
"location": {
"possible_values": None,
"description": "funders located in specified country",
}
}
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,516 | sckott/habanero | refs/heads/main | /test/test-funders.py | import pytest
from requests.exceptions import HTTPError
from habanero import Crossref, exceptions
cr = Crossref()
@pytest.mark.vcr
def test_funders():
"""funders - basic test"""
res = cr.funders(limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_query():
"""funders - param: query"""
res = cr.funders(query="NSF", limit=2)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 2 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_sample_err():
with pytest.raises(exceptions.RequestError):
cr.funders(sample=2)
@pytest.mark.vcr
def test_funders_filter_fails_noidsworks():
with pytest.raises(exceptions.RequestError):
cr.funders(filter={"from_pub_date": "2014-03-03"})
@pytest.mark.vcr
def test_funders_filter_fails_noids():
with pytest.raises(exceptions.RequestError):
cr.funders(works=True, filter={"has_assertion": True})
@pytest.mark.vcr
def test_funders_filter_works():
"""funders - filter works when used with id and works=True"""
res = cr.funders(
ids="10.13039/100000001", works=True, filter={"has_assertion": True}
)
assert dict == res.__class__
assert 20 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_funders_fail_limit():
with pytest.raises(KeyError):
cr.funders(limit="things")
@pytest.mark.vcr
def test_funders_fail_offset():
with pytest.raises(KeyError):
cr.funders(offset="things")
@pytest.mark.vcr
def test_funders_fail_sort():
with pytest.raises(exceptions.RequestError):
cr.funders(sort="things")
@pytest.mark.vcr
def test_funders_field_queries():
"""funders - param: kwargs - field queries work as expected"""
res = cr.funders(
ids="10.13039/100000001",
works=True,
query_container_title="engineering",
filter={"type": "journal-article"},
limit=100,
)
titles = [x.get("title") for x in res["message"]["items"]]
assert dict == res.__class__
assert 5 == len(res["message"])
assert list == titles.__class__
assert 100 == len(titles)
@pytest.mark.vcr
def test_funders_query_filters_not_allowed_with_dois():
with pytest.raises(HTTPError):
cr.funders(ids="10.13039/100000001", query_container_title="engineering")
@pytest.mark.vcr
def test_funders_bad_id_warn():
"""funders - param: warn"""
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_warn():
"""funders - param: warn"""
with pytest.warns(UserWarning):
out = cr.funders(ids=["10.13039/100000001", "10.13039/notarealdoi"], warn=True)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
@pytest.mark.vcr
def test_funders_bad_id_works_warn():
"""funders - param: warn"""
with pytest.warns(UserWarning):
out = cr.funders(ids="10.13039/notarealdoi", works=True, warn=True)
assert out is None
@pytest.mark.vcr
def test_funders_mixed_ids_works_warn():
"""""funders - param: warn""" ""
with pytest.warns(UserWarning):
out = cr.funders(
ids=["10.13039/100000001", "10.13039/notarealdoi", "10.13039/100000005"],
works=True,
warn=True,
)
assert len(out) == 3
assert len([x for x in out if x]) == 2
assert isinstance(out[0], dict)
assert isinstance(out[2], dict)
assert out[1] is None
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,517 | sckott/habanero | refs/heads/main | /habanero/cn/styles.py | import re
import requests
from ..habanero_utils import check_json
def csl_styles(**kwargs) -> list:
"""
Get list of styles from https://github.com/citation-style-language/styles
:param kwargs: any additional arguments will be passed on to `requests.get`
:rtype: list, of CSL styles
Usage::
from habanero import cn
cn.csl_styles()
"""
base = "https://api.github.com/repos/citation-style-language/styles"
tt = requests.get(base + "/commits?per_page=1", **kwargs)
tt.raise_for_status()
check_json(tt)
commres = tt.json()
sha = commres[0]["sha"]
sty = requests.get(base + "/git/trees/" + sha, **kwargs)
sty.raise_for_status()
check_json(sty)
res = sty.json()
files = [z["path"] for z in res["tree"]]
matches = [re.search(".csl", g) for g in files]
csls = [x.string for x in filter(None, matches)]
return [re.sub(".csl", "", x) for x in csls]
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,518 | sckott/habanero | refs/heads/main | /test/test-journals.py | import pytest
from requests.exceptions import HTTPError
from habanero import Crossref, exceptions
cr = Crossref()
@pytest.mark.vcr
def test_journals():
"""journals - basic test"""
res = cr.journals(limit=1)
assert dict == res.__class__
assert dict == res["message"].__class__
assert 1 == res["message"]["items-per-page"]
@pytest.mark.vcr
def test_journals_query():
"""journals - param: query"""
res = cr.journals(query="ecology", limit=2)
assert dict == res.__class__
assert 2 == res["message"]["items-per-page"]
assert "journal-list" == res["message-type"]
@pytest.mark.vcr
def test_journals_ids():
"""journals - param: ids"""
res = cr.journals(ids=["1803-2427", "2326-4225"])
assert list == res.__class__
assert dict == res[0].__class__
assert "journal" == res[0]["message-type"]
@pytest.mark.vcr
def test_journals_works():
"""journals - param: works"""
res1 = cr.journals(
ids="2167-8359", query="ecology", works=True, sort="score", order="asc"
)
scores1 = [x["score"] for x in res1["message"]["items"]]
res2 = cr.journals(
ids="2167-8359", query="ecology", works=True, sort="score", order="desc"
)
scores2 = [x["score"] for x in res2["message"]["items"]]
assert dict == res1.__class__
assert "work-list" == res1["message-type"]
assert max(scores1) == scores1[-1]
assert min(scores2) == scores2[-1]
@pytest.mark.vcr
def test_journals_filter_fails_noidsworks():
with pytest.raises(exceptions.RequestError):
cr.journals(filter={"from_pub_date": "2014-03-03"})
@pytest.mark.vcr
def test_journals_filter_fails_noids():
with pytest.raises(exceptions.RequestError):
cr.journals(works=True, filter={"has_assertion": True})
@pytest.mark.vcr
def test_journals_fail_limit():
with pytest.raises(KeyError):
cr.journals(limit="things")
@pytest.mark.vcr
def test_journals_fail_offset():
with pytest.raises(KeyError):
cr.journals(offset="things")
@pytest.mark.vcr
def test_journals_fail_sort():
with pytest.raises(exceptions.RequestError):
cr.journals(sort="things")
@pytest.mark.vcr
def test_journals_field_queries():
"""journals - param: kwargs - field queries work as expected"""
res = cr.journals(
ids="2167-8359",
works=True,
query_bibliographic="fish",
filter={"type": "journal-article"},
)
titles = [x.get("title")[0] for x in res["message"]["items"]]
assert dict == res.__class__
assert 5 == len(res["message"])
assert list == titles.__class__
assert str == str(titles[0]).__class__
@pytest.mark.vcr
def test_journals_field_queries_not_allowed_with_dois():
with pytest.raises(HTTPError):
cr.journals(ids="2167-8359", query_bibliographic="fish")
@pytest.mark.vcr
def test_journals_bad_id_warn():
"""journals - param: warn"""
with pytest.warns(UserWarning):
out = cr.journals(ids="4444-4444", warn=True)
assert out is None
@pytest.mark.vcr
def test_journals_mixed_ids_warn():
"""journals - param: warn"""
with pytest.warns(UserWarning):
out = cr.journals(ids=["1803-2427", "4444-4444"], warn=True)
assert len(out) == 2
assert isinstance(out[0], dict)
assert out[1] is None
@pytest.mark.vcr
def test_journals_bad_id_works_warn():
"""journals - param: warn"""
with pytest.warns(UserWarning):
out = cr.journals(ids="4444-4444", works=True, warn=True)
assert out is None
@pytest.mark.vcr
def test_journals_mixed_ids_works_warn():
"""""journals - param: warn""" ""
with pytest.warns(UserWarning):
out = cr.journals(
ids=["1803-2427", "4444-4444", "2167-8359"], works=True, warn=True
)
assert len(out) == 3
assert len([x for x in out if x]) == 2
assert isinstance(out[0], dict)
assert isinstance(out[2], dict)
assert out[1] is None
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,519 | sckott/habanero | refs/heads/main | /habanero/cn_formats.py | cn_formats = [
"rdf-xml",
"turtle",
"citeproc-json",
"citeproc-json-ish",
"text",
"ris",
"bibtex",
"crossref-xml",
"datacite-xml",
"bibentry",
"crossref-tdm",
]
cn_format_headers = {
"rdf-xml": "application/rdf+xml",
"turtle": "text/turtle",
"citeproc-json": "transform/application/vnd.citationstyles.csl+json",
"text": "text/x-bibliography",
"ris": "application/x-research-info-systems",
"bibtex": "application/x-bibtex",
"crossref-xml": "application/vnd.crossref.unixref+xml",
"datacite-xml": "application/vnd.datacite.datacite+xml",
"bibentry": "application/x-bibtex",
"crossref-tdm": "application/vnd.crossref.unixsd+xml",
}
cn_types = {
"rdf-xml": "text/xml",
"turtle": "text/plain",
"citeproc-json": "application/json",
"citeproc-json-ish": "application/json",
"text": "text/plain",
"ris": "text/plain",
"bibtex": "text/plain",
"crossref-xml": "text/xml",
"datacite-xml": "text/xml",
"bibentry": "text/plain",
"crossref-tdm": "text/xml",
}
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,520 | sckott/habanero | refs/heads/main | /test/test-settings.py | import pytest
import yaml
from habanero import Crossref
cr_with_ua = Crossref(ua_string="foo bar")
cr_without_ua = Crossref()
cr_with_bad_ua = Crossref(ua_string=5)
vcr_path = "test/cassettes/test-settings/test_ua_string.yaml"
@pytest.mark.vcr(vcr_path)
def test_ua_string():
"""settings (ua_string) - with ua string, works"""
cr_with_ua.works(ids="10.1371/journal.pone.0033693")
try:
x = open(vcr_path, "r").read()
xy = yaml.safe_load(x)
heads = xy["interactions"][0]["request"]["headers"]
assert "foo bar" in heads["User-Agent"][0]
assert "foo bar" in heads["X-USER-AGENT"][0]
except FileNotFoundError:
pytest.skip(f"{vcr_path} not found")
vcr_noua_path = "test/cassettes/test-settings/test_no_ua_string.yaml"
@pytest.mark.vcr(vcr_noua_path)
def test_no_ua_string():
"""settings (ua_string) - without ua string, works"""
cr_without_ua.works(ids="10.1371/journal.pone.0033693")
try:
x = open(vcr_noua_path, "r").read()
xy = yaml.safe_load(x)
heads = xy["interactions"][0]["request"]["headers"]
assert "foo bar" not in heads["User-Agent"][0]
assert "foo bar" not in heads["X-USER-AGENT"][0]
except FileNotFoundError:
pytest.skip(f"{vcr_noua_path} not found")
vcr_path_members = "test/cassettes/test-settings/test_ua_string_members.yaml"
@pytest.mark.vcr(vcr_path_members)
def test_ua_string_members():
"""settings (ua_string) - with ua string, members"""
cr_with_ua.members(query="ecology", limit=2)
try:
x = open(vcr_path_members, "r").read()
xy = yaml.safe_load(x)
heads = xy["interactions"][0]["request"]["headers"]
assert "foo bar" in heads["User-Agent"][0]
assert "foo bar" in heads["X-USER-AGENT"][0]
except FileNotFoundError:
pytest.skip(f"{vcr_path_members} not found")
vcr_path_prefixes = "test/cassettes/test-settings/test_ua_string_prefixes.yaml"
@pytest.mark.vcr(vcr_path_prefixes)
def test_ua_string_prefixes():
"""settings (ua_string) - with ua string, prefixes"""
cr_with_ua.prefixes(ids="10.1016", works=True, sample=2)
try:
x = open(vcr_path_prefixes, "r").read()
xy = yaml.safe_load(x)
heads = xy["interactions"][0]["request"]["headers"]
assert "foo bar" in heads["User-Agent"][0]
assert "foo bar" in heads["X-USER-AGENT"][0]
except FileNotFoundError:
pytest.skip(f"{vcr_path_prefixes} not found")
vcr_path_registration_agency = (
"test/cassettes/test-settings/test_ua_string_registration_agency.yaml"
)
@pytest.mark.vcr(vcr_path_registration_agency)
def test_ua_string_registration_agency():
"""settings (ua_string) - with ua string, registration_agency"""
cr_with_ua.registration_agency("10.1126/science.169.3946.635")
try:
x = open(vcr_path_registration_agency, "r").read()
xy = yaml.safe_load(x)
heads = xy["interactions"][0]["request"]["headers"]
assert "foo bar" in heads["User-Agent"][0]
assert "foo bar" in heads["X-USER-AGENT"][0]
except FileNotFoundError:
pytest.skip(f"{vcr_path_registration_agency} not found")
def test_ua_string_errors():
"""settings (ua_string) - fails well"""
with pytest.raises(TypeError):
cr_with_bad_ua.works(ids="10.1371/journal.pone.0033693")
# NOTE: the two test blocks above using cassettes is super hacky
# - i can't find a way to inspect the request headers that get sent
# - so just inspecting the request headers recorded in the cassette
# - i.e., re-running to record cassettes from scratch will fail
# - on the first run, but then suceed after that
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,521 | sckott/habanero | refs/heads/main | /habanero/request_class.py | import math
import warnings
import requests
from tqdm import tqdm # type: ignore
from .exceptions import RequestError
from .filterhandler import filter_handler
from .habanero_utils import (
check_json,
filter_dict,
ifelsestr,
make_ua,
rename_query_filters,
)
class Request(object):
"""
Habanero: request class
This is the request class for all requests
"""
def __init__(
self,
mailto,
ua_string,
url,
path,
query=None,
filter=None,
offset=None,
limit=None,
sample=None,
sort=None,
order=None,
facet=None,
select=None,
cursor=None,
cursor_max=None,
agency=False,
progress_bar=False,
**kwargs
):
self.mailto = mailto
self.ua_string = ua_string
self.url = url
self.path = path
self.query = query
self.filter = filter
self.offset = offset
self.limit = limit
self.sample = sample
self.sort = sort
self.order = order
self.facet = facet
self.select = select
self.cursor = cursor
self.cursor_max = cursor_max
self.agency = agency
self.progress_bar = progress_bar
self.kwargs = kwargs
def _url(self):
tmpurl = self.url + self.path
return tmpurl.strip("/")
def do_request(self, should_warn=False):
filt = filter_handler(self.filter)
if self.select.__class__ is list:
self.select = ",".join(self.select)
if not isinstance(self.cursor_max, (type(None), int)):
raise ValueError("cursor_max must be of class int")
payload = {
"query": self.query,
"filter": filt,
"offset": self.offset,
"rows": self.limit,
"sample": self.sample,
"sort": self.sort,
"order": self.order,
"facet": self.facet,
"select": self.select,
"cursor": self.cursor,
}
# convert limit/offset to str before removing None
# b/c 0 (zero) is falsey, so that param gets dropped
payload["offset"] = ifelsestr(payload["offset"])
payload["rows"] = ifelsestr(payload["rows"])
# remove params with value None
payload = dict((k, v) for k, v in payload.items() if v)
# add query filters
payload.update(filter_dict(self.kwargs))
# rename query filters
payload = rename_query_filters(payload)
js = self._req(payload=payload, should_warn=should_warn)
if js is None:
return js
cu = js["message"].get("next-cursor")
max_avail = js["message"]["total-results"]
res = self._redo_req(js, payload, cu, max_avail, should_warn)
return res
def _redo_req(self, js, payload, cu, max_avail, should_warn):
if cu.__class__.__name__ != "NoneType" and self.cursor_max > len(
js["message"]["items"]
):
res = [js]
total = len(js["message"]["items"])
# progress bar setup
if self.progress_bar:
actual_max = (
self.cursor_max if self.cursor_max is not None else max_avail
)
if max_avail < actual_max:
actual_max = max_avail
runs = math.ceil(actual_max / (self.limit or 20))
pbar = tqdm(total=runs - 1)
while (
cu.__class__.__name__ != "NoneType"
and self.cursor_max > total
and total < max_avail
):
payload["cursor"] = cu
out = self._req(payload=payload, should_warn=should_warn)
cu = out["message"].get("next-cursor")
res.append(out)
total = sum([len(z["message"]["items"]) for z in res])
if self.progress_bar:
pbar.update(1)
if self.progress_bar:
pbar.close()
return res
else:
return js
def _req(self, payload, should_warn):
try:
r = requests.get(
self._url(),
params=payload,
headers=make_ua(self.mailto, self.ua_string),
)
r.raise_for_status()
except requests.exceptions.HTTPError:
try:
f = r.json()
raise RequestError(r.status_code, f["message"][0]["message"])
except:
if should_warn:
mssg = "%s: %s" % (r.status_code, r.reason)
warnings.warn(mssg)
return None
else:
r.raise_for_status()
except requests.exceptions.RequestException as e:
print(e)
check_json(r)
return r.json()
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,522 | sckott/habanero | refs/heads/main | /habanero/crossref/crossref.py | from typing import List, Union
from ..habanero_utils import check_kwargs, sub_str
from ..request import request
from ..request_class import Request
from .filters import (
funders_filter_details,
members_filter_details,
works_filter_details,
)
class Crossref:
"""
Crossref: Class for Crossref search API methods
:param base_url: Base URL to use for http requests
:param api_key: An API key to send with each http request
:param mailto: A mailto string, see section below
:param ua_string: A user agent string, see section below
|
|
**Includes methods matching Crossref API routes**
* /works - :func:`~habanero.Crossref.works`
* /members - :func:`~habanero.Crossref.members`
* /prefixes - :func:`~habanero.Crossref.prefixes`
* /funders - :func:`~habanero.Crossref.funders`
* /journals - :func:`~habanero.Crossref.journals`
* /types - :func:`~habanero.Crossref.types`
* /licenses - :func:`~habanero.Crossref.licenses`
Also:
* registration_agency - :func:`~habanero.Crossref.registration_agency`
* random_dois - :func:`~habanero.Crossref.random_dois`
**What am I actually searching when using the Crossref search API?**
You are using the Crossref search API described at
https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md.
When you search with query terms on Crossref servers, they are not
searching full text, or even abstracts of articles, but only what is
available in the data that is returned to you. That is, they search
article titles, authors, etc. For some discussion on this, see
https://github.com/CrossRef/rest-api-doc/issues/101.
**The Polite Pool**
As of September 18th 2017, any API queries that use HTTPS and have
appropriate contact information will be directed to a special pool
of API machines that are reserved for polite users. If you connect
to the Crossref API using HTTPS and provide contact
information, then they will send you to a separate pool of machines,
with better control of the performance of these machines because they can
block abusive users.
We have been using `https` in `habanero` for a while now, so that's good
to go. To get into the Polite Pool, also set your mailto email address
when you instantiate the `Crossref` object. See examples below.
**Setting a custom user-agent string**
Using `ua_string` you can set an additional string that will be added
to the UA string we send in every request, which looks like:
`python-requests/2.22.0 habanero/0.7.0`. We send that string with
the headers: `User-Agent` and `X-USER-AGENT`. Turn on verbose curl
output to see the request headers sent. To unset the `ua_string`
you set, just initialize a new Crossref class.
**Doing setup**::
from habanero import Crossref
cr = Crossref()
# set a different base url
Crossref(base_url = "http://some.other.url")
# set an api key
Crossref(api_key = "123456")
# set a mailto address to get into the "polite pool"
Crossref(mailto = "foo@bar.com")
# set an additional user-agent string
Crossref(ua_string = "foo bar")
.. _RateLimits:
**Rate limits**
See the headers `X-Rate-Limit-Limit` and `X-Rate-Limit-Interval` for current
rate limits. As of this writing, the limit is 50 requests per second,
but that could change. In addition, it's not clear what the time is to reset.
See below for getting header info for your requests.
.. _CurlOpts:
**Verbose curl output**::
import requests
import logging
import http.client
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
from habanero import Crossref
cr = Crossref()
cr.works(query = "ecology")
.. _FieldQueries:
**Field queries**
One or more field queries. Field queries are searches on specific fields.
For example, using `query_author` searches author names instead of full search
across all fields as would happen by default. Acceptable field
query parameters are:
* `query_container_title` - Query container-title aka. publication name
* `query_author` - Query author given and family names
* `query_editor` - Query editor given and family names
* `query_chair` - Query chair given and family names
* `query_translator` - Query translator given and family names
* `query_contributor` - Query author, editor, chair and translator given and
family names
* `query_bibliographic` - Query bibliographic information, useful for citation
look up. Includes titles, authors, ISSNs and publication years. Crossref
retired `query_title`; use this field query instead
* `query_affiliation` - Query contributor affiliations
.. _sorting:
**Sort options**
* `score` or `relevance` - Sort by relevance score
* `updated` - Sort by date of most recent change to metadata. Currently the same as deposited.
* `deposited` - Sort by time of most recent deposit
* `indexed` - Sort by time of most recent index
* `published` - Sort by publication date
* `published-print` - Sort by print publication date
* `published-online` - Sort by online publication date
* `issued` - Sort by issued date (earliest known publication date)
* `is-referenced-by-count` - Sort by number of references to documents
* `references-count` - Sort by number of references made by documents
.. _Facets:
**Facet count options**
* `affiliation` - Author affiliation. Allowed value: *
* `year` - Earliest year of publication, synonym for published. Allowed value: *
* `funder-name` - Funder literal name as deposited alongside DOIs. Allowed value: *
* `funder-doi` - Funder DOI. Allowed value: *
* `orcid` - Contributor ORCID. Max value: 100
* `container-title` - Work container title, such as journal title, or book title. Max value: 100
* `assertion` - Custom Crossmark assertion name. Allowed value: *
* `archive` - Archive location. Allowed value: *
* `update-type` - Significant update type. Allowed value: *
* `issn` - Journal ISSN (any - print, electronic, link). Max value: 100
* `published` - Earliest year of publication. Allowed value: *
* `type-name` - Work type name, such as journal-article or book-chapter. Allowed value: *
* `license` - License URI of work. Allowed value: *
* `category-name` - Category name of work. Allowed value: *
* `relation-type` - Relation type described by work or described by another work with work as object. Allowed value: *
* `assertion-group` - Custom Crossmark assertion group name. Allowed value: *
|
|
|
"""
def __init__(
self,
base_url: str = "https://api.crossref.org",
api_key: str = None,
mailto: str = None,
ua_string: str = None,
) -> None:
self.base_url = base_url
self.api_key = api_key
self.mailto = mailto
self.ua_string = ua_string
def __repr__(self):
return (
"""< %s \nURL: %s\nKEY: %s\nMAILTO: %s\nADDITIONAL UA STRING: %s\n>"""
% (
type(self).__name__,
self.base_url,
sub_str(self.api_key),
self.mailto,
self.ua_string,
)
)
def works(
self,
ids: Union[List[str], str] = None,
query: str = None,
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref works
:param ids: DOIs (digital object identifier) or other identifiers
:param query: A query string
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relavant when searching with specific dois.
Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used.
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found.
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). default: False
:param warn: warn instead of raise error upon HTTP request error. default: False
Especially helpful when passing in many DOIs where some may lead to request failures.
Returns `None` when `warn=True` for each DOI that errors.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.works()
cr.works(ids = '10.1371/journal.pone.0033693')
dois = ['10.1371/journal.pone.0033693', ]
cr.works(ids = dois)
x = cr.works(query = "ecology")
x['status']
x['message-type']
x['message-version']
x['message']
x['message']['total-results']
x['message']['items-per-page']
x['message']['query']
x['message']['items']
# Get full text links
x = cr.works(filter = {'has_full_text': True})
x
# Parse output to various data pieces
x = cr.works(filter = {'has_full_text': True})
## get doi for each item
[ z['DOI'] for z in x['message']['items'] ]
## get doi and url for each item
[ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ]
### print every doi
for i in x['message']['items']:
print i['DOI']
# filters - pass in as a dict
## see https://github.com/CrossRef/rest-api-doc#filter-names
cr.works(filter = {'has_full_text': True})
cr.works(filter = {'has_funder': True, 'has_full_text': True})
cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'})
## to repeat a filter name, pass in a list
x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100)
map(lambda z:z['funder'][0]['DOI'], x['message']['items'])
# Deep paging, using the cursor parameter
## this search should lead to only ~215 results
cr.works(query = "widget", cursor = "*", cursor_max = 100)
## this search should lead to only ~2500 results, in chunks of 500
res = cr.works(query = "octopus", cursor = "*", limit = 500)
sum([ len(z['message']['items']) for z in res ])
## about 167 results
res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500)
sum([ len(z['message']['items']) for z in res ])
## cursor_max to get back only a maximum set of results
res = cr.works(query = "widget", cursor = "*", cursor_max = 100)
sum([ len(z['message']['items']) for z in res ])
## cursor_max - especially useful when a request could be very large
### e.g., "ecology" results in ~275K records, lets max at 10,000
### with 1000 at a time
res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
### use progress bar
res = cr.works(query = "octopus", cursor = "*", limit = 500, progress_bar = True)
# field queries
res = cr.works(query = "ecology", query_author = 'carl boettiger')
[ x['author'][0]['family'] for x in res['message']['items'] ]
# select certain fields to return
## as a comma separated string
cr.works(query = "ecology", select = "DOI,title")
## or as a list
cr.works(query = "ecology", select = ["DOI","title"])
# set an additional user-agent string
## the string is additional because it's added to the UA string we send in every request
## turn on verbose curl output to see the request headers sent
x = Crossref(ua_string = "foo bar")
x
x.works(ids = '10.1371/journal.pone.0033693')
## unset the additional user-agent string
x = Crossref()
x.works(ids = '10.1371/journal.pone.0033693')
"""
if ids.__class__.__name__ != "NoneType":
return request(
self.mailto,
self.ua_string,
self.base_url,
"/works/",
ids,
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
None,
None,
None,
None,
progress_bar,
warn,
**kwargs
)
else:
return Request(
self.mailto,
self.ua_string,
self.base_url,
"/works/",
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
cursor,
cursor_max,
None,
progress_bar,
**kwargs
).do_request(should_warn=warn)
def members(
self,
ids: Union[List[str], str] = None,
query: str = None,
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
works: bool = False,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref members
:param ids: DOIs (digital object identifier) or other identifiers
:param query: A query string
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
IMPORTANT: when `works=False` the filters that will work are the members
filters; when `works=True` the filters that will work are the works filters
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: If true, works returned as well. Default: false
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used. Only used if `works=True`
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found. Only used if `works=True`
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). Only used if `works=True`. default: False
:param warn: warn instead of raise error upon HTTP request error. default: False
Especially helpful when passing in many DOIs where some may lead to request failures.
Returns `None` when `warn=True` for each DOI that errors.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.members(ids = 98)
# get works
res = cr.members(ids = 98, works = True, limit = 3)
len(res['message']['items'])
[ z['DOI'] for z in res['message']['items'] ]
# cursor - deep paging
res = cr.members(ids = 98, works = True, cursor = "*")
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
## use progress bar
res = cr.members(ids = 98, works = True, cursor = "*", cursor_max = 500, progress_bar = True)
# field queries
res = cr.members(ids = 98, works = True, query_author = 'carl boettiger', limit = 7)
[ x['author'][0]['family'] for x in res['message']['items'] ]
# filters (as of this writing, 4 filters are avail., see filter_names())
res = cr.members(filter = {'has_public_references': True})
"""
return request(
self.mailto,
self.ua_string,
self.base_url,
"/members/",
ids,
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
works,
cursor,
cursor_max,
None,
progress_bar,
warn,
**kwargs
)
def prefixes(
self,
ids: Union[List[str], str],
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
works: bool = False,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref prefixes
:param ids: DOIs (digital object identifier) or other identifiers. required
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: If true, works returned as well. Default: false
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used. Only used if `works=True`
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found. Only used if `works=True`
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). Only used if `works=True`. default: False
:param warn: warn instead of raise error upon HTTP request error. default: False
Especially helpful when passing in many DOIs where some may lead to request failures.
Returns `None` when `warn=True` for each DOI that errors.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.prefixes(ids = "10.1016")
cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093'])
# get works
cr.prefixes(ids = "10.1016", works = True)
# Limit number of results
cr.prefixes(ids = "10.1016", works = True, limit = 3)
# Sort and order
cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc")
# cursor - deep paging
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
## use progress bar
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", cursor_max = 200, progress_bar = True)
# field queries
res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ]
"""
check_kwargs(["query"], kwargs)
return request(
self.mailto,
self.ua_string,
self.base_url,
"/prefixes/",
ids,
query=None,
filter=filter,
offset=offset,
limit=limit,
sample=sample,
sort=sort,
order=order,
facet=facet,
select=select,
works=works,
cursor=cursor,
cursor_max=cursor_max,
progress_bar=progress_bar,
should_warn=warn,
**kwargs
)
def funders(
self,
ids: Union[List[str], str] = None,
query: str = None,
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
works: bool = False,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref funders
Note that funders without IDs don't show up on the `/funders` route,
that is, won't show up in searches via this method
:param ids: DOIs (digital object identifier) or other identifiers
:param query: A query string
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
IMPORTANT: when `works=False` the filters that will work are the funders
filters; when `works=True` the filters that will work are the works filters
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relavant when searching with specific dois. Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: If true, works returned as well. Default: false
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used. Only used if `works=True`
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found. Only used if `works=True`
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). Only used if `works=True`. default: False
:param warn: warn instead of raise error upon HTTP request error. default: False
Especially helpful when passing in many DOIs where some may lead to request failures.
Returns `None` when `warn=True` for each DOI that errors.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.funders(ids = '10.13039/100000001')
cr.funders(query = "NSF")
# get works
cr.funders(ids = '10.13039/100000001', works = True)
# cursor - deep paging
res = cr.funders(ids = '10.13039/100000001', works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
## use progress bar
res = cr.funders(ids = '10.13039/100000001', works = True, cursor = "*", cursor_max = 200, progress_bar = True)
# field queries
res = cr.funders(ids = "10.13039/100000001", works = True, query_container_title = 'engineering', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ]
# filters (as of this writing, only 1 filter is avail., "location")
cr.funders(filter = {'location': "Sweden"})
# warn
cr.funders(ids = '10.13039/notarealdoi')
cr.funders(ids = '10.13039/notarealdoi', warn=True)
cr.funders(ids = '10.13039/notarealdoi', works=True, warn=True)
cr.funders(ids = ['10.13039/100000001','10.13039/notarealdoi'], works=True, warn=True)
x = cr.funders(ids = ['10.13039/100000001','10.13039/notarealdoi'], warn=True)
len(x) # 2
[type(w) for w in x] # [dict, NoneType]
"""
return request(
self.mailto,
self.ua_string,
self.base_url,
"/funders/",
ids,
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
works,
cursor,
cursor_max,
None,
progress_bar,
warn,
**kwargs
)
def journals(
self,
ids: Union[List[str], str] = None,
query: str = None,
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
works: bool = False,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref journals
:param ids: DOIs (digital object identifier) or other identifiers
:param query: A query string
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: If true, works returned as well. Default: false
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used. Only used if `works=True`
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found. Only used if `works=True`
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). Only used if `works=True`. default: False
:param warn: warn instead of raise error upon HTTP request error. default: False
Especially helpful when passing in many DOIs where some may lead to request failures.
Returns `None` when `warn=True` for each DOI that errors.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.journals(ids = "2167-8359")
cr.journals()
# pass many ids
cr.journals(ids = ['1803-2427', '2326-4225'])
# search
cr.journals(query = "ecology")
cr.journals(query = "peerj")
# get works
cr.journals(ids = "2167-8359", works = True)
cr.journals(ids = "2167-8359", query = 'ecology', works = True, sort = 'score', order = "asc")
cr.journals(ids = "2167-8359", query = 'ecology', works = True, sort = 'score', order = "desc")
cr.journals(ids = "2167-8359", works = True, filter = {'from_pub_date': '2014-03-03'})
cr.journals(ids = '1803-2427', works = True)
cr.journals(ids = '1803-2427', works = True, sample = 1)
cr.journals(limit: 2)
# cursor - deep paging
res = cr.journals(ids = "2167-8359", works = True, cursor = "*", cursor_max = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
## use progress bar
res = cr.journals(ids = "2167-8359", works = True, cursor = "*", cursor_max = 200, progress_bar = True)
# field queries
res = cr.journals(ids = "2167-8359", works = True, query_bibliographic = 'fish', filter = {'type': 'journal-article'})
[ x.get('title') for x in res['message']['items'] ]
"""
return request(
self.mailto,
self.ua_string,
self.base_url,
"/journals/",
ids,
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
works,
cursor,
cursor_max,
None,
progress_bar,
warn,
**kwargs
)
def types(
self,
ids: Union[List[str], str] = None,
query: str = None,
filter: dict = None,
offset: float = None,
limit: float = None,
sample: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
works: bool = False,
select: Union[List[str], str] = None,
cursor: str = None,
cursor_max: float = 5000,
progress_bar: bool = False,
warn: bool = False,
**kwargs
) -> dict:
"""
Search Crossref types
:param ids: Type identifier, e.g., journal
:param query: A query string
:param filter: Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: If true, works returned as well. Default: false
:param cursor: Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used. Only used if `works=True`
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found. Only used if `works=True`
:param progress_bar: print progress bar. only used when doing deep paging (
when using cursor parameter). Only used if `works=True`. default: False
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.types()
cr.types(ids = "journal")
cr.types(ids = "journal-article")
cr.types(ids = "journal", works = True)
# deep paging
res = cr.types(ids = "journal-article", works = True, cursor = "*", cursor_max = 120)
## use progress bar
res = cr.types(ids = "journal-article", works = True, cursor = "*", cursor_max = 120, progress_bar = True)
# field queries
res = cr.types(ids = "journal-article", works = True, query_bibliographic = 'gender', rows = 100)
[ x.get('title') for x in res['message']['items'] ]
"""
return request(
self.mailto,
self.ua_string,
self.base_url,
"/types/",
ids,
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
works,
cursor,
cursor_max,
None,
progress_bar,
warn,
**kwargs
)
def licenses(
self,
query: str = None,
offset: float = None,
limit: float = None,
sort: str = None,
order: str = None,
facet: Union[str, bool] = None,
**kwargs
) -> dict:
"""
Search Crossref licenses
:param query: A query string
:param offset: Number of record to start at, from 1 to 10000
:param limit: Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sort: Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: Sort order, one of 'asc' or 'desc'
:param facet: Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.licenses()
cr.licenses(query = "creative")
"""
check_kwargs(["ids", "filter", "works"], kwargs)
res = request(
self.mailto,
self.ua_string,
self.base_url,
"/licenses/",
None,
query,
None,
offset,
limit,
None,
sort,
order,
facet,
None,
None,
None,
None,
**kwargs
)
return res
def registration_agency(self, ids: Union[List[str], str], **kwargs) -> list:
"""
Determine registration agency for DOIs
:param ids: DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:rtype: list
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
"""
check_kwargs(
[
"query",
"filter",
"offset",
"limit",
"sample",
"sort",
"order",
"facet",
"works",
],
kwargs,
)
res = request(
self.mailto,
self.ua_string,
self.base_url,
"/works/",
ids,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
True,
**kwargs
)
if res.__class__ != list:
k = []
k.append(res)
else:
k = res
return [z["message"]["agency"]["label"] for z in k]
def random_dois(self, sample: int = 10, **kwargs) -> list:
"""
Get a random set of DOIs
:param sample: Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:rtype: list
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100)
"""
res = request(
self.mailto,
self.ua_string,
self.base_url,
"/works/",
None,
None,
None,
None,
None,
sample,
None,
None,
None,
None,
True,
None,
None,
None,
**kwargs
)
return [z["DOI"] for z in res["message"]["items"]]
def filter_names(self, type: str = "works") -> list:
"""
Filter names - just the names of each filter
Filters are used in the Crossref search API to modify searches.
As filters are introduced or taken away, we may get out of sync; check
the docs for the latest https://github.com/CrossRef/rest-api-doc
:param type: what type of filters, i.e., what API route, matches
methods here. one of "works", "members", or "funders". Default: "works"
:rtype: list
Usage::
from habanero import Crossref
cr = Crossref()
cr.filter_names()
cr.filter_names("members")
cr.filter_names("funders")
"""
nms = list(self.filter_details(type).keys())
nms.sort()
return nms
def filter_details(self, type: str = "works") -> dict:
"""
Filter details - filter names, possible values, and description
Filters are used in the Crossref search API to modify searches.
As filters are introduced or taken away, we may get out of sync; check
the docs for the latest https://github.com/CrossRef/rest-api-doc
:param type: what type of filters, i.e., what API route,
matches methods here. one of "works", "members", or "funders".
Default: "works"
:rtype: dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.filter_details()
cr.filter_details("members")
cr.filter_details("funders")
# Get descriptions for each filter
x = cr.filter_details()
[ z['description'] for z in x.values() ]
"""
types = ["works", "members", "funders"]
if type not in types:
raise ValueError("'type' must be one of " + "', '".join(types))
output: dict[str, dict] = {
"works": works_filter_details,
"members": members_filter_details,
"funders": funders_filter_details,
}[type]
return output
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,523 | sckott/habanero | refs/heads/main | /habanero/crossref/workscontainer.py | from typing import Union
class WorksContainer:
"""
WorksContainer: Class for working with works results
:rtype: list
Usage::
from habanero import Crossref, WorksContainer
cr = Crossref()
res = cr.works(ids=['10.1136/jclinpath-2020-206745', '10.1136/esmoopen-2020-000776'])
x = WorksContainer(res)
x
x.works
x.doi
x.license
x.title
x.abstract
res2 = cr.works(limit = 2)
x = WorksContainer(res2)
x
x.works
x.doi
x.license
x.title
x.abstract
res3 = cr.members(ids = 98, works = True, limit = 5)
x = WorksContainer(res3)
x
x.works
x.doi
x.license
x.title
x.abstract
"""
def __init__(self, input) -> None:
super(WorksContainer, self).__init__()
if not input:
raise ValueError("input len must be > zero")
self.__input = input
self.works = self.works_handler(self.__input)
keys = list(self.works[0].keys())
for key in keys:
values = [work.get(key, None) for work in self.works]
setattr(self, key.lower().replace("-", "_"), values)
def __repr__(self) -> str:
return """<%s: No. works: %s>""" % (
type(self).__name__,
len(self.works),
)
def works_handler(self, x: Union[list, dict]) -> list:
message_type = (
[w["message-type"] for w in x][0]
if isinstance(x, list)
else x["message-type"]
)
if isinstance(x, list):
if x[0]["message-type"] == "work":
x = list(filter(lambda w: w["message-type"] == "work", x))
return [w["message"] for w in x]
elif x[0]["message-type"] == "work-list":
x = list(filter(lambda w: w["message-type"] == "work-list", x))
items = [w["message"]["items"] for w in x]
return [z for sublist in items for z in sublist]
else:
raise TypeError(
f"can only handle Crossref message-type 'work' & 'work-list', got: '{message_type}'"
)
elif isinstance(x, dict) and x["message-type"] == "work-list":
return x["message"]["items"]
elif isinstance(x, dict) and x["message-type"] == "work":
return [x["message"]]
else:
raise TypeError(
f"can only handle Crossref message-type 'work' & 'work-list', got: '{message_type}'"
)
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,524 | sckott/habanero | refs/heads/main | /test/test-registration_agency.py | import pytest
from requests import HTTPError
from habanero import Crossref
cr = Crossref()
@pytest.mark.vcr
def test_registration_agency():
"""registration agency"""
res = cr.registration_agency("10.1126/science.169.3946.635")
assert list == res.__class__
assert str == res[0].__class__
@pytest.mark.vcr
def test_registration_agency_unicode():
"""registration agency- unicode"""
res = cr.registration_agency("10.1126/science.169.3946.635")
assert list == res.__class__
assert str == res[0].__class__
@pytest.mark.vcr
def test_registration_agency_bad_request():
"""registration agency - bad request"""
with pytest.raises(HTTPError):
cr.registration_agency(5)
| {"/test/test-workscontainer.py": ["/habanero/__init__.py"], "/test/test-pagination_params.py": ["/habanero/__init__.py"], "/habanero/counts/counts.py": ["/habanero/habanero_utils.py"], "/habanero/counts/__init__.py": ["/habanero/counts/counts.py"], "/test/test-licenses.py": ["/habanero/__init__.py"], "/test/test-types.py": ["/habanero/__init__.py"], "/test/test-cursor.py": ["/habanero/__init__.py"], "/habanero/cnrequest.py": ["/habanero/cn_formats.py", "/habanero/habanero_utils.py"], "/test/test-filters.py": ["/habanero/__init__.py"], "/habanero/__init__.py": ["/habanero/cn/__init__.py", "/habanero/counts/__init__.py", "/habanero/crossref/__init__.py", "/habanero/exceptions.py"], "/habanero/habanero_utils.py": ["/habanero/__init__.py", "/habanero/exceptions.py", "/habanero/noworks.py", "/habanero/response.py"], "/habanero/cn/__init__.py": ["/habanero/cn/cn.py", "/habanero/cn/styles.py"], "/habanero/crossref/__init__.py": ["/habanero/crossref/crossref.py", "/habanero/crossref/workscontainer.py"], "/test/test-random_dois.py": ["/habanero/__init__.py"], "/habanero/cn/cn.py": ["/habanero/cnrequest.py", "/habanero/cn/constants.py"], "/habanero/request.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py", "/habanero/request_class.py"], "/test/test-funders.py": ["/habanero/__init__.py"], "/habanero/cn/styles.py": ["/habanero/habanero_utils.py"], "/test/test-journals.py": ["/habanero/__init__.py"], "/test/test-settings.py": ["/habanero/__init__.py"], "/habanero/request_class.py": ["/habanero/exceptions.py", "/habanero/filterhandler.py", "/habanero/habanero_utils.py"], "/habanero/crossref/crossref.py": ["/habanero/habanero_utils.py", "/habanero/request.py", "/habanero/request_class.py", "/habanero/crossref/filters.py"], "/test/test-registration_agency.py": ["/habanero/__init__.py"]} |
77,525 | StasPsaryuk/ocr_by_psariuk | refs/heads/master | /pre_image.py | # --*-- coding=utf-8 --*--
# Імпорт модулів необхідних для попередньої обробки фото
from PIL import Image, ImageFilter, ImageChops
import cv2
# Функція фільтрації зображення з використанням модулів бібліотеки СV
def get_image(filename:str):
image = cv2.imread(filename)
gray = cv2.threshold(image,127,255,cv2.THRESH_TOZERO)[1]
filename = "{}.jpg".format('123')
cv2.imwrite('tmp/'+filename, gray)
tmp_image = Image.open('tmp/'+filename)
tmp_image = tmp_image.crop(tmp_image.getbbox())
tmp_image.save('tmp/321.png')
return tmp_image
| {"/main.py": ["/pre_image.py", "/recognize.py", "/finder.py", "/xls_writer.py"]} |
77,526 | StasPsaryuk/ocr_by_psariuk | refs/heads/master | /recognize.py | # --*-- coding=utf-8 --*--
# Імпорт модулів необхідних для розпізнавання
import pytesseract
import re
# Функція розпізнавання тексту засобами Тесеракт та фільтрація
# тексту за ключовими словами (Номера маклерів)
def get_text(image, lang):
text = pytesseract.image_to_string(image, lang)
text__list = text.split('\n')
result_list = []
result = []
tmp_str = ''
for item in text__list:
if item.strip() != '':
tmp_str += ' ' + item
phones =re.findall('\d{10}', tmp_str)
# print(phones)
# Розбиття розпізнаного тексту на окремі оголошення
for item in range(0, len(phones), 2):
if item == 0:
start_index = 0
end_index = tmp_str.find(phones[item]) + 10
obj = tmp_str[start_index:end_index]
tmp_str = tmp_str[end_index:]
else:
start_index = tmp_str.find(phones[item-1])+10
end_index = tmp_str.find(phones[item]) + 10
if end_index - start_index < 25 or tmp_str[start_index:end_index].strip() == '':
pass
else:
obj = tmp_str[start_index:end_index]
tmp_str = tmp_str[end_index:]
result_list.append(obj)
# Виправлення неточностей розпізнавання
for item in result_list:
if item.find('кімн') != -1:
obj = item[item.find('кімн')-2:]
obj = obj.replace('жім', 'кім')
result.append(obj)
print(obj)
return result
| {"/main.py": ["/pre_image.py", "/recognize.py", "/finder.py", "/xls_writer.py"]} |
77,527 | StasPsaryuk/ocr_by_psariuk | refs/heads/master | /xls_writer.py | # --*-- coding=utf-8 --*--
# Імпорт модулів необхідних для запису файлів формату xls
import xlwt
from uuid import uuid4
# Функція збереження відфільтрованого тексту в файл
def writer(ls:list, count_room:int):
file_neme = str(uuid4().hex)+ '_output.xls'
wb =xlwt.Workbook()
ws = wb.add_sheet(str(count_room)+'кімн.')
for item in range(len(ls)):
ws.write(item, 0, ls[item][0])
ws.write(item, 1, ls[item][1])
wb.save(file_neme)
return file_neme
# Функція запису відфільтрованого тексту в файл
def text_writer(ls:list):
file_neme = str(uuid4().hex)+ '_output.xls'
wb =xlwt.Workbook()
ws = wb.add_sheet('Лист')
for item in range(len(ls)):
ws.write(item, 0, ls[item])
wb.save(file_neme)
return file_neme | {"/main.py": ["/pre_image.py", "/recognize.py", "/finder.py", "/xls_writer.py"]} |
77,528 | StasPsaryuk/ocr_by_psariuk | refs/heads/master | /finder.py | # --*-- coding=utf-8 --*--
# Імпорт модулів необхідних для запису файлів формату xls
import re
import xlrd
# Функція фільтрації розпізнаного тексту за ключовими словами
def sort_list(ls:list):
result = [[], [], []]
for item in ls:
if item.startswith('1-кімн.'):
result[0].append(item)
if item.startswith('2-кімн.'):
result[1].append(item)
if item.startswith('3-кімн.'):
result[2].append(item)
return result
# Функція отримання бази номерів маклерів
def get_tel_rieltors(file:object):
rielt_list =[]
index = 1
wb = xlrd.open_workbook(file)
sheet = wb.sheet_by_name('Ріелтори')
vals = [sheet.row_values(rownum) for rownum in range(sheet.nrows)][1:]
return vals
# Функція пошуку в тексті телефонних номерів
def search(ls:str):
tel = re.findall('\d{10}', ls)
if tel:
return tel
| {"/main.py": ["/pre_image.py", "/recognize.py", "/finder.py", "/xls_writer.py"]} |
77,529 | StasPsaryuk/ocr_by_psariuk | refs/heads/master | /main.py | # --*-- coding=utf-8 --*--
# Імпорт бібліотек
import telebot
import pytesseract
try:
from PIL import Image
except ImportError:
import Image
# Імпорт модулів функціонування розпізнавання
# та функціонального аналізу
from pre_image import get_image
from recognize import get_text
from finder import search, get_tel_rieltors, sort_list
from xls_writer import writer, text_writer
# Константи
rieltors = 'filtr.xls'
result =[]
start = 0
count_rooms = 0
lang = 'ukr'
todo = 0
is_xls = 0
if __name__ == '__main__':
bot = telebot.TeleBot('1028387271:AAEhPKKCfQBNeadeMS1k1jMUZpusniiprv4')
# Декоратори - обробники вхідних команд, тексту чи зображень
@bot.message_handler(commands=['start'])
def start_message(message):
global count_rooms
global todo
global is_xls
global start
start = 1
count_rooms = 0
todo = 0
is_xls = 0
user_markup = None
# Використання кнопок для керування ботом
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('Англійська мова')
user_markup.row('Українська мова')
bot.send_message(message.chat.id, 'Доброго дня ' + message.from_user.first_name + ', виберіть мову розпізнавання:', reply_markup=user_markup)
# Декоратори - обробники команди 'about'
@bot.message_handler(commands=['about'])
def about_message(message):
bot.send_message(message.chat.id, "Представлений проект - реалізація системи оптичного розпізнавання символів та функціональне опрацювання фільтрації тексту за ключовими словами на прикладі обробки оголошень про нерухомість журналу 'Від і До'")
# Функція - обробник режиму керування ботом
# @bot.message_handler(commands=['tdo'])
def todo_message(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('Розпізнавання звичайного тексту')
user_markup.row("Функціональне OCR для журналу 'Від і До'")
bot.send_message(message.chat.id, "Виберіть сценарій розпізнавання:", reply_markup=user_markup)
# Функція - обробник повернення результату
# @bot.message_handler(commands=['xls'])
def xls_message(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('Зберегти в форматі xls')
user_markup.row('Повернути в діалог')
bot.send_message(message.chat.id, "Виберіть спосіб повернення опрацьованих даних:", reply_markup=user_markup)
# Функція - обробник фільтра по оголошеннях
# @bot.message_handler(commands=['croom'])
def croom_message(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('1-кімнатні')
user_markup.row('2-кімнатні')
user_markup.row('Повернутись на початок /start')
bot.send_message(message.chat.id, "Виберіть параметр для фільтрування оголошень ", reply_markup=user_markup)
# Функція - обробник повернення на початок діалогу з ботом
# @bot.message_handler(commands=['start'])
def return_message(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('Повернутись на початок /start')
bot.send_message(message.chat.id, "Надішліть зображення для розпізнавання або поверніться на початок", reply_markup=user_markup)
# Загальний декоратор обробки вхідного тексту
@bot.message_handler(content_types=['text'])
def handle_text(message):
global start
global lang
global count_rooms
global todo
global is_xls
if message.text == "Англійська мова":
lang = 'eng'
elif message.text == "Українська мова":
lang = 'ukr'
if message.text == "Розпізнавання звичайного тексту":
todo = 1
#bot.send_message(message.chat.id, 'Надішліть зображення для розпізнавання.')
elif message.text == "Функціональне OCR для журналу 'Від і До'":
todo = 2
#bot.send_message(message.chat.id, 'Надішліть зображення для розпізнавання.')
if message.text == "Зберегти в форматі xls":
is_xls = 1
bot.send_message(message.chat.id, 'Інформацію буде збережено в файл')
elif message.text == 'Повернути в діалог':
is_xls = 2
bot.send_message(message.chat.id, 'Інформацію буде повернено в діалог')
if message.text == '1-кімнатні':
count_rooms = 1
bot.send_message(message.chat.id, 'Надішліть зображення для розпізнавання.')
elif message.text == '2-кімнатні':
count_rooms = 2
bot.send_message(message.chat.id, 'Надішліть зображення для розпізнавання.')
elif message.text == 'Повернутись на початок /start':
count_rooms = 0
todo = 0
is_xls = 0
lang = 'ukr'
start = 0
#bot.send_message(message.chat.id, '/start')
# Перевірки констан натиснутих кнопок для вибору вітки функціонування бота
if not start:
start_message(message)
elif not todo:
todo_message(message)
elif todo == 1:
return_message(message)
elif not is_xls and todo == 2:
xls_message(message)
elif not count_rooms and todo == 2:
croom_message(message)
# Функція повернення результату в форматі xls
def send_xls_file(message, file_name):
file = open(file_name, 'rb')
bot.send_document(message.chat.id, file)
# Декоратор обробки вхідного зображення
@bot.message_handler(content_types=['photo'])
def handle_docs_photo(message):
global todo
global lang
global is_xls
global count_rooms
# Функціонування бота шляхом спеціалізованого розпізнавання журналу "Від і До"
if todo == 2:
try:
fileID = message.photo[-1].file_id
file_info = bot.get_file(fileID)
downloaded_file = bot.download_file(file_info.file_path)
with open("image.jpg", 'wb') as new_file:
new_file.write(downloaded_file)
bot.reply_to(message, "Опрацьовую отримане зображення. Тривалість обробки залежить від кількості символів на зображенні.")
result = []
image = get_image("image.jpg")
ad = get_text(image, lang)
# print(ad)
rielt_list = get_tel_rieltors(rieltors)
#print(rielt_list)
ad_sorted = sort_list(ad)
print(ad_sorted)
for item in ad_sorted[count_rooms - 1]:
if search(item):
for record in search(item):
if record not in rielt_list:
result.append([record, item])
print(result)
if result != []:
bot.send_message(message.chat.id, "Відфільтровані оголошення:")
if is_xls == 1:
file_name = writer(result, count_rooms)
send_xls_file(message, file_name)
elif is_xls == 2:
for item in result:
bot.send_message(message.chat.id, item[1])
else:
bot.send_message(message.chat.id, message.from_user.first_name + ' , на жаль, співпадінь по фільтру не знайдено.')
except Exception as e:
bot.reply_to(message, e)
# Функціонування бота шляхом звичайного розпізнавання тексту із зображень
elif todo == 1:
try:
fileID = message.photo[-1].file_id
file_info = bot.get_file(fileID)
downloaded_file = bot.download_file(file_info.file_path)
with open("image.png", 'wb') as new_file:
new_file.write(downloaded_file)
bot.reply_to(message, "Опрацьовую отримане зображення. Тривалість обробки залежить від кількості символів на зображенні.")
text = ''
#image = get_image("image.png")
text = pytesseract.image_to_string('image.png', lang)
bot.send_message(message.chat.id, text)
except Exception as e:
bot.reply_to(message, e)
bot.polling(none_stop=True, interval=0)
| {"/main.py": ["/pre_image.py", "/recognize.py", "/finder.py", "/xls_writer.py"]} |
77,532 | qsoyq/sanic-cms | refs/heads/master | /cms/models.py | from config import Engine, DATABASE
from peewee import *
from playhouse.db_url import connect
db_engine = DATABASE.get('DB_ENGINE')
db_name = DATABASE.get('DB_NAME')
db_user = DATABASE.get('DB_USER')
db_password = DATABASE.get('DB_PASSWORD')
db_host = DATABASE.get('DB_HOST')
db_port = DATABASE.get('DB_PORT')
print (db_engine == Engine.SQLITE)
if db_engine == Engine.SQLITE:
db = connect('%s:///%s' % (db_engine.value, db_name))
elif db_engine in (Engine.MYSQL, Engine.POSTGRESQL):
db = connect('%s:///%s:%s@%s:%d/%d' % (
db_engine.value, db_user, db_password,
db_host, db_port, db_name))
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = CharField(unique=True)
| {"/app.py": ["/cms/models.py"]} |
77,533 | qsoyq/sanic-cms | refs/heads/master | /app.py | import os
from sanic import Sanic
from sanic.config import Config
from sanic.response import html, text
from sanic.exceptions import RequestTimeout
from jinja2 import PackageLoader
from sanic_jinja2 import SanicJinja2
from config import DATABASE, THEMES
from cms.models import db, User
app = Sanic(__name__)
app.config.update(DATABASE)
app.config.update(THEMES)
# static files serve
app.static('/static', './cms/static')
# Jinja2 template engine
template_package_loader = PackageLoader(app.name, 'cms/templates')
template = SanicJinja2(app, loader=template_package_loader)
# Add listeners
@app.listener('before_server_start')
async def setup_db(app, loop):
# database create
db_lock = 'db.lock'
if not os.path.exists(db_lock):
db.create_tables([User])
open(db_lock, "w+").close()
# Add middleware
@app.exception(RequestTimeout)
def timeout(request, exception):
if request is not None:
print('ERROR HANDLE:\t', request, exception)
return text('RequestTimeout from error_handler.', 408)
return text('RequestTimeout from error_handler.', 408)
# Add views
async def index(request):
"""Index view"""
return template.render('index.html', request, greetings='Welcome to Sanic-CMS')
# Add routes
app.add_route(index, '/')
app.add_route(index, '/index.html')
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
| {"/app.py": ["/cms/models.py"]} |
77,547 | saddam1999/mtkclient | refs/heads/main | /stage2.py | #!/usr/bin/env python3
import os
import logging
import sys
import argparse
from binascii import hexlify
from struct import pack, unpack
from Library.usblib import usb_class
from Library.utils import LogBase
from Library.utils import print_progress
class Stage2(metaclass=LogBase):
def __init__(self, args, loglevel=logging.INFO):
self.__logger = self.__logger
self.args = args
self.info = self.__logger.info
self.error = self.__logger.error
self.warning = self.__logger.warning
if loglevel == logging.DEBUG:
logfilename = os.path.join("logs", "log.txt")
if os.path.exists(logfilename):
os.remove(logfilename)
fh = logging.FileHandler(logfilename)
self.__logger.addHandler(fh)
self.__logger.setLevel(logging.DEBUG)
else:
self.__logger.setLevel(logging.INFO)
portconfig = [[0x0E8D, 0x0003, -1], [0x0E8D, 0x2000, -1]]
self.cdc = usb_class(portconfig=portconfig, loglevel=loglevel, devclass=10)
def connect(self):
self.cdc.connected = self.cdc.connect()
return self.cdc.connected
def close(self):
if self.cdc.connected:
self.cdc.close()
def readflash(self, type: int, start, length, display=False, filename: str = None):
wf = None
buffer = bytearray()
if filename is not None:
wf = open(filename, "wb")
sectors = (length // 0x200) + (1 if length % 0x200 else 0)
startsector = (start // 0x200)
# emmc_switch(1)
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x1002))
self.cdc.usbwrite(pack(">I", type))
if display:
print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)
# kick-wdt
# self.cdc.usbwrite(pack(">I", 0xf00dd00d))
# self.cdc.usbwrite(pack(">I", 0x3001))
bytestoread = sectors * 0x200
bytesread = 0
old = 0
# emmc_read(0)
for sector in range(startsector, sectors):
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x1000))
self.cdc.usbwrite(pack(">I", sector))
tmp = self.cdc.usbread(0x200, 0x200)
if len(tmp) != 0x200:
self.error("Error on getting data")
return
if display:
prog = sector / sectors * 100
if round(prog, 1) > old:
print_progress(prog, 100, prefix='Progress:',
suffix='Complete, Sector:' + hex((sectors * 0x200) - bytestoread),
bar_length=50)
old = round(prog, 1)
bytesread += len(tmp)
size = min(bytestoread, len(tmp))
if wf is not None:
wf.write(tmp[:size])
else:
buffer.extend(tmp)
bytestoread -= size
if display:
print_progress(100, 100, prefix='Complete: ', suffix=filename, bar_length=50)
if wf is not None:
wf.close()
else:
return buffer[start % 0x200:(start % 0x200) + length]
def preloader(self, start, length, filename):
sectors = 0
if start != 0:
start = (start // 0x200)
if length != 0:
sectors = (length // 0x200) + (1 if length % 0x200 else 0)
self.info("Reading preloader...")
if self.cdc.connected:
if sectors == 0:
buffer = self.readflash(type=1, start=0, length=0x4000, display=False)
if len(buffer) != 0x4000:
print("Error on reading boot1 area.")
return
if buffer[:9] == b'EMMC_BOOT':
startbrlyt = unpack("<I", buffer[0x10:0x14])[0]
if buffer[startbrlyt:startbrlyt + 5] == b"BRLYT":
start = unpack("<I", buffer[startbrlyt + 0xC:startbrlyt + 0xC + 4])[0]
st = buffer[start:start + 4]
if st == b"MMM\x01":
length = unpack("<I", buffer[start + 0x20:start + 0x24])[0]
self.readflash(type=1, start=start, length=length, display=True, filename=filename)
print("Done")
return
if buffer[:4] == b"MMM\x01":
length = unpack("<I", buffer[start + 0x20:start + 0x24])[0]
self.readflash(type=1, start=start, length=length, display=True, filename=filename)
print("Done")
else:
start=0
length=0x40000
self.readflash(type=1, start=start, length=length, display=True, filename=filename)
else:
self.readflash(type=1, start=start, length=length, display=True, filename=filename)
print("Done")
def memread(self, start, length, filename):
bytestoread = length
addr = start
data = b""
pos = 0
if filename is not None:
wf = open(filename, "wb")
while bytestoread > 0:
size = min(bytestoread, 0x200)
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x4000))
self.cdc.usbwrite(pack(">I", addr + pos))
self.cdc.usbwrite(pack(">I", size))
if filename is not None:
data += self.cdc.usbread(size, size)
else:
wf.write(self.cdc.usbwrite(size, size))
bytestoread -= size
pos += size
self.info(f"{hex(start)}: " + hexlify(data).decode('utf-8'))
if filename is not None:
wf.close()
def memwrite(self, start, data, filename):
if filename is not None:
rf = open(filename, "rb")
bytestowrite = os.stat(filename).st_size
else:
if isinstance(data, str):
data = bytes.fromhex(data)
elif isinstance(data, int):
data = pack("<I", data)
bytestowrite = len(data)
addr = start
pos = 0
while bytestowrite > 0:
size = min(bytestowrite, 0x200)
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x4002))
self.cdc.usbwrite(pack(">I", addr + pos))
self.cdc.usbwrite(pack(">I", size))
if filename is None:
self.cdc.usbwrite(data[pos:pos + 4])
else:
self.cdc.usbwrite(rf.read(4))
bytestowrite -= size
pos += size
ack = self.cdc.usbread(4)
if ack == b"\xD0\xD0\xD0\xD0":
self.info(f"Successfully wrote data to {hex(start)}.")
else:
self.info(f"Failed to write data to {hex(start)}.")
if filename is not None:
rf.close()
def rpmb(self, start, length, filename, reverse=False):
if start == 0:
start = 0
else:
start = (start // 0x100)
if length == 0:
sectors = 4 * 1024 * 1024 // 0x100
else:
sectors = (length // 0x100) + (1 if length % 0x100 else 0)
self.info("Reading rpmb...")
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x1002))
self.cdc.usbwrite(pack(">I", 0x1))
# kick-wdt
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x3001))
print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)
bytesread = 0
old = 0
bytestoread = sectors * 0x100
with open(filename, "wb") as wf:
for sector in range(start, sectors):
self.cdc.usbwrite(pack(">I", 0xf00dd00d))
self.cdc.usbwrite(pack(">I", 0x2000))
self.cdc.usbwrite(pack(">H", sector))
tmp = self.cdc.usbread(0x100, 0x100)
if reverse:
tmp=tmp[::-1]
if len(tmp) != 0x100:
self.error("Error on getting data")
return
prog = sector / sectors * 100
if round(prog, 1) > old:
print_progress(prog, 100, prefix='Progress:',
suffix='Complete, Sector:' + hex((sectors * 0x200) - bytestoread),
bar_length=50)
old = round(prog, 1)
bytesread += 0x100
size = min(bytestoread, len(tmp))
wf.write(tmp[:size])
bytestoread -= size
print_progress(100, 100, prefix='Complete: ', suffix=filename, bar_length=50)
print("Done")
def getint(valuestr):
if valuestr == '':
return None
try:
return int(valuestr)
except:
try:
return int(valuestr, 16)
except Exception as err:
pass
return 0
def main():
parser = argparse.ArgumentParser(description='Stage2 client (c) B.Kerler 2021.')
parser.add_argument('--rpmb', dest='rpmb', action="store_true",
help='Dump rpmb')
parser.add_argument('--reverse', dest='reverse', action="store_true",
help='Reverse rpmb byte order')
parser.add_argument('--preloader', dest='preloader', action="store_true",
help='Dump preloader')
parser.add_argument('--memread', dest='memread', action="store_true",
help='Dump memory')
parser.add_argument('--memwrite', dest='memwrite', action="store_true",
help='Write to memory')
parser.add_argument('--length', dest='length', type=str,
help='Max length to dump')
parser.add_argument('--start', dest='start', type=str,
help='Start offset to dump')
parser.add_argument('--data', dest='data', type=str,
help='Data to write')
parser.add_argument('--filename', dest='filename', type=str,
help='Read from / save to filename')
args = parser.parse_args()
start = getint(args.start)
length = getint(args.length)
if not os.path.exists("logs"):
os.mkdir("logs")
st2 = Stage2(args)
if st2.connect():
if args.rpmb:
if args.filename is None:
filename = os.path.join("logs", "rpmb")
else:
filename = args.filename
st2.rpmb(start, length, filename, args.reverse)
elif args.preloader:
if args.filename is None:
filename = os.path.join("logs", "preloader")
else:
filename = args.filename
st2.preloader(start, length, filename=filename)
elif args.memread:
st2.memread(start, length, args.filename)
elif args.memwrite:
st2.memwrite(start, args.data, args.filename)
st2.close()
if __name__ == "__main__":
main()
| {"/Library/pltools.py": ["/Library/hwcrypto_dxcc.py"]} |
77,548 | saddam1999/mtkclient | refs/heads/main | /Library/pltools.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) B.Kerler 2018-2021 MIT License
import os
import logging
from binascii import hexlify
from struct import pack, unpack
from Library.cqdma import cqdma
from Library.utils import LogBase, print_progress
from Library.hwcrypto_sej import sej
from Library.hwcrypto_gcpu import GCpu
from Library.hwcrypto_dxcc import dxcc
from Library.kamakiri import Kamakiri
from Library.Port import Port
class PLTools(metaclass=LogBase):
def __init__(self, mtk, loglevel=logging.INFO):
self.mtk = mtk
self.__logger = self.__logger
self.info = self.__logger.info
self.debug = self.__logger.debug
self.error = self.__logger.error
self.warning = self.__logger.warning
self.chipconfig = self.mtk.config.chipconfig
self.config = self.mtk.config
self.usbwrite = self.mtk.port.usbwrite
self.usbread = self.mtk.port.usbread
self.read32 = self.mtk.preloader.read32
self.write32 = self.mtk.preloader.write32
self.hwcode = mtk.config.hwcode
# exploit types
self.cqdma = cqdma(mtk, loglevel)
self.kama = Kamakiri(self.mtk, self.__logger.level)
# crypto types
self.gcpu = GCpu(mtk, loglevel)
self.sej = sej(mtk, loglevel)
self.dxcc = dxcc(mtk, loglevel)
if loglevel == logging.DEBUG:
logfilename = os.path.join("logs", "log.txt")
if os.path.exists(logfilename):
os.remove(logfilename)
fh = logging.FileHandler(logfilename)
self.__logger.addHandler(fh)
self.__logger.setLevel(logging.DEBUG)
else:
self.__logger.setLevel(logging.INFO)
def runpayload(self, filename, ptype, offset=0, ack=0xA1A2A3A4, addr=None, dontack=False):
try:
with open(filename, "rb") as rf:
rf.seek(offset)
payload = rf.read()
self.info(f"Loading payload from {filename}, {hex(len(payload))} bytes")
except FileNotFoundError:
self.info("Couldn't open {filename} for reading.")
return False
if addr is None:
if ptype == "amonet":
addr = self.chipconfig.da_payload_addr
elif ptype == "kamakiri":
addr = self.chipconfig.brom_payload_addr
elif ptype == "hashimoto":
addr = self.chipconfig.da_payload_addr
elif ptype == "":
if self.mtk.config.target_config["sla"] or self.mtk.config.target_config["daa"]:
addr = self.chipconfig.brom_payload_addr
else:
addr = self.chipconfig.da_payload_addr
if ptype == "amonet":
self.info("Amonet Run")
if self.payload(payload, addr, ptype):
if dontack:
return True
result = self.usbread(4)
if result == pack(">I", ack):
self.info("Successfully sent payload: " + filename)
return True
self.info("Error, payload answered instead: " + hexlify(result).decode('utf-8'))
return False
else:
self.error("Error on sending payload: " + filename)
return True
elif ptype == "kamakiri":
self.info("Kamakiri / DA Run")
if self.kama.payload(payload, addr, True):
if dontack:
return True
result = self.usbread(4)
if result == pack(">I", ack):
self.info("Successfully sent payload: " + filename)
return True
self.info("Error, payload answered instead: " + hexlify(result).decode('utf-8'))
return False
else:
self.error("Error on sending payload: " + filename)
elif ptype == "hashimoto":
self.info("Hashimoto Run")
if self.payload(payload, addr, "cqdma"):
if dontack:
return True
result = self.usbread(4)
if result == pack(">I", ack):
self.info("Successfully sent payload: " + filename)
return True
self.info("Error, payload answered instead: " + hexlify(result).decode('utf-8'))
return False
else:
self.error("Error on sending payload: " + filename)
else:
self.info("Kamakiri / DA Run")
if self.kama.payload(payload, addr, False):
if dontack:
return True
result = self.usbread(4)
if result == pack(">I", ack):
self.info("Successfully sent payload: " + filename)
return True
self.info("Error, payload answered instead: " + hexlify(result).decode('utf-8'))
return False
else:
self.error("Error on sending payload: " + filename)
def crash(self, mode=0):
self.info("Crashing da...")
if mode == 1:
self.mtk.preloader.send_da(0, 0x100, 0x100, b'\x00' * 0x100)
elif mode == 2:
self.mtk.preloader.read32(0, 0x100)
elif mode == 0:
try:
payload = b'\x00\x01\x9F\xE5\x10\xFF\x2F\xE1' + b'\x00' * 0x110
self.mtk.preloader.send_da(0x0, len(payload), 0x0, payload)
self.mtk.preloader.jump_da(0x0)
except Exception as e:
self.debug(str(e))
pass
def crasher(self, mtk, enforcecrash):
plt = PLTools(mtk, self.__logger.level)
if enforcecrash or not (mtk.port.cdc.vid == 0xE8D and mtk.port.cdc.pid == 0x0003):
self.info("We're not in bootrom, trying to crash da...")
for crashmode in range(0, 3):
try:
plt.crash(crashmode)
except Exception as e:
self.__logger.debug(str(e))
pass
portconfig = [[0xE8D, 0x0003, 1]]
mtk.port = Port(mtk, portconfig, self.__logger.level)
if mtk.preloader.init(maxtries=20):
break
return mtk
def run_dump_brom(self, filename, btype):
pfilename = os.path.join("payloads", "generic_dump_payload.bin")
if btype == "amonet":
if self.dump_brom(filename, "gcpu"):
self.info("Bootrom dumped as: " + filename)
return True
else:
self.error("Error on sending payload: " + pfilename)
elif btype == "hashimoto":
if self.dump_brom(filename, "cqdma"):
self.info("Bootrom dumped as: " + filename)
return True
else:
self.error("Error on sending payload: " + pfilename)
elif btype == "kamakiri" or btype is None:
self.info("Kamakiri / DA Run")
if self.runpayload(filename=pfilename, ptype="kamakiri", ack=0xC1C2C3C4, offset=0):
if self.kama.dump_brom(filename):
self.info("Bootrom dumped as: " + filename)
return True
else:
self.error("Error on sending payload: " + filename)
elif btype == "test":
data=self.aes_hwcrypt(data=b"",encrypt=False,mode="fde",btype="dxcc")
print(hexlify(data).decode('utf-8'))
else:
self.error("Unknown dumpbrom ptype: " + btype)
self.info("Available ptypes are: amonet, kamakiri, hashimoto")
self.error("Error on dumping Bootrom.")
return False
def run_crypto(self, data, iv, btype="sej", encrypt=True):
if data is None:
data = bytearray()
for i in range(32):
data.append(self.config.meid[i % len(self.config.meid)])
if btype == "":
encrypted = self.aes_hwcrypt(data=data, iv=iv, encrypt=encrypt, btype=btype)
return encrypted
return False
def disable_range_blacklist(self, btype):
if btype == "gcpu":
self.info("GCPU Init Crypto Engine")
self.gcpu.init()
self.gcpu.acquire()
self.gcpu.init()
self.gcpu.acquire()
self.info("Disable Caches")
self.mtk.preloader.run_ext_cmd(0xB1)
self.info("GCPU Disable Range Blacklist")
self.gcpu.disable_range_blacklist()
elif btype == "cqdma":
self.info("Disable Caches")
self.mtk.preloader.run_ext_cmd(0xB1)
self.info("CQDMA Disable Range Blacklist")
self.cqdma.disable_range_blacklist()
def dump_brom(self, filename, btype):
if btype == "gcpu" and self.chipconfig.gcpu_base is None:
self.error("Chipconfig has no gcpu_base field for this cpu")
return False
elif btype == "cqdma" and self.chipconfig.cqdma_base is None or self.chipconfig.ap_dma_mem is None:
self.error("Chipconfig has no cqdma_base and/or ap_dma_mem field for this cpu")
return False
if self.chipconfig.blacklist:
self.disable_range_blacklist(btype)
self.info("Dump bootrom")
print_progress(0, 100, prefix='Progress:', suffix='Complete', bar_length=50)
old = 0
with open(filename, 'wb') as wf:
for addr in range(0x0, 0x20000, 16):
prog = int(addr / 0x20000 * 100)
if round(prog, 1) > old:
print_progress(prog, 100, prefix='Progress:', suffix='Complete, addr %08X' % addr,
bar_length=50)
old = round(prog, 1)
if btype == "gcpu":
wf.write(self.gcpu.aes_read_cbc(addr))
elif btype == "cqdma":
if not self.chipconfig.blacklist:
wf.write(self.cqdma.mem_read(addr, 16, True))
else:
wf.write(self.cqdma.mem_read(addr, 16, False))
print_progress(100, 100, prefix='Progress:', suffix='Complete', bar_length=50)
return True
def payload(self, payload, daaddr, ptype):
self.disable_range_blacklist(ptype)
try:
while len(payload) % 4 != 0:
payload += b"\x00"
words = []
for x in range(len(payload) // 4):
word = payload[x * 4:(x + 1) * 4]
word = unpack("<I", word)[0]
words.append(word)
self.info("Sending payload")
self.write32(self, words)
self.info("Running payload ...")
self.write32(self.mtk.config.chipconfig.blacklist[0][0] + 0x40, daaddr)
return True
except Exception as e:
self.error("Failed to load payload file. Error: " + str(e))
return False
def aes_hwcrypt(self, data, iv=None, encrypt=True, mode="cbc", btype="sej"):
if btype == "sej":
self.disable_range_blacklist(btype)
if encrypt:
if mode == "cbc":
return self.sej.hw_aes128_cbc_encrypt(buf=data, encrypt=True)
else:
if mode == "cbc":
return self.sej.hw_aes128_cbc_encrypt(buf=data, encrypt=False)
elif btype == "gcpu":
self.disable_range_blacklist(btype)
addr = self.chipconfig.da_payload_addr
if mode == "ebc":
return self.gcpu.aes_read_ebc(data=data, encrypt=encrypt)
if mode == "cbc":
if self.gcpu.aes_setup_cbc(addr=addr, data=data, iv=iv, encrypt=encrypt):
return self.gcpu.aes_read_cbc(addr=addr, encrypt=encrypt)
elif btype == "cqdma":
self.disable_range_blacklist(btype)
elif btype == "dxcc":
if self.chipconfig.cqdma_base is not None:
self.disable_range_blacklist("cqdma")
elif self.chipconfig.gcpu_base is not None:
self.disable_range_blacklist("gcpu")
if mode == "fde":
return self.dxcc.generate_fde()
elif mode == "rpmb":
return self.dxcc.generate_rpmb()
else:
self.error("Unknown aes_hwcrypt type: " + btype)
self.error("aes_hwcrypt supported types are: sej")
return bytearray()
| {"/Library/pltools.py": ["/Library/hwcrypto_dxcc.py"]} |
77,549 | saddam1999/mtkclient | refs/heads/main | /Library/hwcrypto_dxcc.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# (c) B.Kerler 2018-2021 MIT License
import logging, os
from struct import pack
from Library.utils import LogBase
regval = {
"DXCC_CON": 0x0000,
}
class dxcc_reg:
def __init__(self, mtk):
self.mtk = mtk
self.dxcc_base = mtk.config.chipconfig.dxcc_base
self.read32 = self.mtk.preloader.read32
self.write32 = self.mtk.preloader.write32
def __setattr__(self, key, value):
if key in ("mtk", "sej_base", "read32", "write32", "regval"):
return super(dxcc_reg, self).__setattr__(key, value)
if key in regval:
addr = regval[key] + self.sej_base
return self.write32(addr, value)
else:
return super(dxcc_reg, self).__setattr__(key, value)
def __getattribute__(self, item):
if item in ("mtk", "sej_base", "read32", "write32", "regval"):
return super(dxcc_reg, self).__getattribute__(item)
if item in regval:
addr = regval[item] + self.sej_base
return self.read32(addr)
else:
return super(dxcc_reg, self).__getattribute__(item)
class dxcc(metaclass=LogBase):
rpmb_ikey = b"RPMB KEY"
rpmb_salt = b"SASI"
fde_ikey = b"SQNC!LFZ"
fde_salt = b"TBTJ"
DX_HOST_IRR = 0xA00
DX_HOST_ICR = 0xA08 # DX_CC_REG_OFFSET(HOST_RGF, HOST_ICR)
DX_DSCRPTR_QUEUE0_WORD0 = 0xE80
DX_DSCRPTR_QUEUE0_WORD1 = 0xE84
DX_DSCRPTR_QUEUE0_WORD2 = 0xE88
DX_DSCRPTR_QUEUE0_WORD3 = 0xE8C
DX_DSCRPTR_QUEUE0_WORD4 = 0xE90
DX_DSCRPTR_QUEUE0_WORD5 = 0xE94
DX_DSCRPTR_QUEUE0_CONTENT = 0xE9C
DX_HOST_SEP_HOST_GPR4 = 0xAA0
def SB_HalClearInterruptBit(self):
self.write32(self.dxcc_base + self.DX_HOST_ICR, [4])
def SB_CryptoWait(self):
while True:
value = self.read32(self.dxcc_base + self.DX_HOST_IRR)
if value != 0:
return value
return None
def SaSi_PalDmaUnMap(self, value1):
return
def SaSi_PalDmaMap(self, value1):
# value2=value1
return value1
def SaSi_SB_AddDescSequence(self, data):
while True:
if self.read32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_CONTENT) << 0x1C != 0:
break
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD0, data[0])
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD1, data[1])
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD2, data[2])
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD3, data[3])
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD4, data[4])
self.write32(self.dxcc_base + self.DX_DSCRPTR_QUEUE0_WORD5, data[5])
def __init__(self, mtk, loglevel=logging.INFO):
self.mtk = mtk
self.__logger = self.__logger
self.hwcode = self.mtk.config.hwcode
self.reg = dxcc_reg(mtk)
self.dxcc_base = self.mtk.config.chipconfig.dxcc_base
self.read32 = self.mtk.preloader.read32
self.write32 = self.mtk.preloader.write32
self.writemem = self.mtk.preloader.writemem
self.info = self.__logger.info
if loglevel == logging.DEBUG:
logfilename = os.path.join("logs", "log.txt")
if os.path.exists(logfilename):
os.remove(logfilename)
fh = logging.FileHandler(logfilename)
self.__logger.addHandler(fh)
self.__logger.setLevel(logging.DEBUG)
else:
self.__logger.setLevel(logging.INFO)
def tzcc_clk(self, value):
if value:
res = self.write32(0x1000108C, 0x18000000)
else:
res = self.write32(0x10001088, 0x8000000)
return res
def generate_fde(self):
self.tzcc_clk(1)
fdekey = self.SBROM_KeyDerivation(1, self.fde_ikey, self.fde_salt, 0x10)
self.tzcc_clk(0)
return fdekey
def generate_trustonic_fde(self, key_sz=32):
fdekey = b""
for ctr in range(0, key_sz // 16):
self.tzcc_clk(1)
trustonic = b"TrustedCorekeymaster" + b'\x07' * 0x10
seed = trustonic + pack("<B", ctr)
fdekey += self.SBROM_KeyDerivation(1, b"", seed, 16)
self.tzcc_clk(0)
return fdekey
def generate_rpmb(self):
self.tzcc_clk(1)
rpmbkey = self.SBROM_KeyDerivation(1, self.rpmb_ikey, self.rpmb_salt, 0x20)
self.tzcc_clk(0)
return rpmbkey
# SBROM_KeyDerivation(dxcc_base,encmode=1,fde1,8,fde2,4,fdekey,fdekey>>31,fdekeylen
"""
SBROM_KeyDerivation PC(00230B77) R0:10210000,R1:00000001,R2:001209D8,R3:00000008,R4:00100000,R5:00233760,R6:00000010
R2:53514e43214c465a
R5:52504d42204b45595341534953514e43
key="SQNC!LFZ",8
salt="SASI",4
requestedlen=0x10
"""
def SBROM_KeyDerivation(self, encmode, key, salt, requestedlen):
result = bytearray()
buffer = bytearray(b"\x00" * 0x43)
if encmode - 1 > 4 or (1 << (encmode - 1) & 0x17) == 0:
return 0xF2000002
if requestedlen > 0xFF or (requestedlen << 28) & 0xFFFFFFFF:
return 0xF2000003
if 0x0 >= len(key) > 0x20:
return 0xF2000003
bufferlen = len(salt) + 3 + len(key)
iterlength = (requestedlen + 0xF) >> 4
if len(key) == 0:
keyend = 1
else:
buffer[1:1 + len(key)] = key
keyend = len(key) + 1
saltstart = keyend + 1
if len(salt) > 0:
buffer[saltstart:saltstart + len(salt)] = salt
buffer[saltstart + len(salt)] = 8 * requestedlen
# buffer=0153514e43214c465a005442544a80
for i in range(0, iterlength):
buffer[0] = i + 1
dstaddr = self.SBROM_AesCmac(encmode, 0x0, buffer, 0, bufferlen)
if dstaddr != 0:
for field in self.read32(dstaddr + 0x108, 4):
result.extend(pack("<I", field))
return result
def SBROM_AesCmac(self, encmode, salt, buffer, flag, bufferlen):
saltptr2 = 0
dataptr2 = 0
dataptr = self.mtk.config.chipconfig.da_payload_addr + 0x118 # SP - 0xA8 - 0x24 - 0x28 - 0x38 - 0x88 - 0x30 - ((12 * 8) - 16)
saltptr = dataptr - 0x10
destptr = saltptr - 0x108
self.writemem(dataptr, buffer[:bufferlen])
self.writemem(saltptr, pack("<Q", salt))
if self.SBROM_AesCmacDriver(encmode, saltptr, saltptr2, dataptr, dataptr2, destptr, bufferlen):
return destptr
return 0
def SB_HalInit(self):
return self.SB_HalClearInterruptBit()
def SB_HalWaitDescCompletion(self, destptr):
data = []
self.SB_HalClearInterruptBit()
val = self.SaSi_PalDmaMap(0)
data.append(0x0) # 0
data.append(0x8000011) # 1 #DIN_DMA|DOUT_DMA|DIN_CONST
data.append(destptr) # 2
data.append(0x8000012) # 3
data.append(0x100) # 4
data.append((destptr >> 32) << 16) # 5
self.SaSi_SB_AddDescSequence(data)
while True:
if self.SB_CryptoWait() & 4 != 0:
break
while True:
value = self.read32(self.dxcc_base + 0xBA0)
if value != 0:
break
if value == 1:
self.SB_HalClearInterruptBit()
self.SaSi_PalDmaUnMap(val)
return 0
else:
return 0xF6000001
def SBROM_AesCmacDriver(self, encmode, saltptr, saltptr2, dataptr, dataptr2, destptr, bufferlength):
if encmode == 1:
if self.read32(self.dxcc_base + self.DX_HOST_SEP_HOST_GPR4) & 2 != 0:
keylen = 0x20
else:
keylen = 0x10
else:
keylen = 0x10
self.SB_HalInit()
outputlen = (keylen << 19) - 0x800000 # 0x0
data = []
data.append(0x0) # 0
data.append(0x8000041) # 1
data.append(0x0) # 2
data.append(0x0) # 3
data.append(outputlen | 0x1001C20) # 4
data.append(0x0) # 5
self.SaSi_SB_AddDescSequence(data)
data[0] = 0
data[1] = 0
data[2] = 0
data[3] = 0
data[5] = 0
if encmode == 0:
data[0] = saltptr
data[1] = 0x42
data[5] = (saltptr >> 32) << 16
data[4] = outputlen | ((encmode & 3) << 15) | (((encmode >> 2) & 3) << 20) | 0x4001C20 # 04009C20
self.SaSi_SB_AddDescSequence(data)
data[0] = dataptr
data[1] = (4 * (bufferlength & 0xFFFFFF)) | 2 # 3E
data[2] = 0
data[3] = 0
data[4] = 1
data[5] = (dataptr2 >> 32) << 16
self.SaSi_SB_AddDescSequence(data)
if encmode != 2:
data[0] = 0
data[1] = 0
data[2] = saltptr # 120934
data[3] = 0x42
data[4] = 0x8001C26
data[5] = (saltptr2 >> 32) << 16
self.SaSi_SB_AddDescSequence(data)
return self.SB_HalWaitDescCompletion(destptr) == 0
| {"/Library/pltools.py": ["/Library/hwcrypto_dxcc.py"]} |
77,550 | tkerr97/PyScribe | refs/heads/master | /knnclassifier/knnclassifier.py | from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from utils import load_images
from skimage.feature import hog
from sklearn.model_selection import GridSearchCV
# load images
images, labels = load_images()
images = images[0:10000]
labels = labels[0:10000]
hogs = []
for image in images:
hogs.append(hog(image, pixels_per_cell=(7, 7), cells_per_block=(4, 4)))
print("Done")
X_train, X_test, y_train, y_test = train_test_split(hogs, labels, test_size=.15)
grid_params = {
'n_neighbors': [3, 5, 7, 9],
'weights': ['uniform', 'distance']
}
# Fit a KNN classifier on the training set
search = GridSearchCV(KNeighborsClassifier(), grid_params)
search.fit(X_train, y_train)
total = 0
right = 0
print(search.best_params_)
for image, label in zip(X_test, y_test):
if search.predict(image.reshape(1, -1)) == label:
right += 1
total += 1
print(right / total)
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,551 | tkerr97/PyScribe | refs/heads/master | /text_detection/character_segmentation/character_segmentation.py | import cv2
import os
import numpy as np
import shutil
from WordSegmentation import wordSegmentation, prepareImg
def word_segment(img_name):
# separate document image into word images
print('Segmenting words of sample %s'%img_name)
# read image, prepare it by resizing it to fixed height and converting it to grayscale
img = prepareImg(cv2.imread(img_name), 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)
# write output to 'out/inputFileName' directory
if os.path.exists('./%s'%img_name):
shutil.rmtree('./%s'%img_name)
os.mkdir('./%s'%img_name)
else:
os.mkdir('./%s'%img_name)
# iterate over all segmented words
print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
cv2.imwrite('./%s/%d/%d.png'%(img_name, j), wordImg, wordImg) # save word
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
# output summary image with bounding boxes around words
cv2.imwrite('./%s/summary.png'%img_name, img)
def segment(file_name: str):
img = cv2.imread(file_name, 0)
cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU,img)
contours, hier = cv2.findContours(img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
cv2.imshow("contours", img)
cv2.waitKey(0)
d=0
for ctr in contours:
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = img[y:y+h, x:x+w]
cv2.imshow('character: %d'%d,roi)
cv2.imwrite('character_%d.png'%d, roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
d+=1
def capture(camera_number: int , img_name: str):
cam = cv2.VideoCapture(camera_number)
cv2.namedWindow('Text Detection')
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
captured_img = False
break
elif k%256 == 32:
# SPACE pressed
img_dir = img_name
cv2.imwrite(img_dir, frame)
print("{} written!".format(img_name))
captured_img = True
break
def increase_contrast(img_name: str):
# load image
img_dir = img_name
img_gray = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE)
# increase contrast
threshold = 150
img_gray = 255 - img_gray # invert color
for row in range(img_gray.shape[0]):
for col in range(img_gray.shape[1]):
# print(img_gray[row][col])
if (img_gray[row][col] < threshold):
img_gray[row][col] = 0
else:
img_gray[row][col] = 255
# img_gray = 255 - img_gray #invert color back
# increase line width
kernel = np.ones((3, 3), np.uint8)
processed_img = cv2.erode(img_gray, kernel, iterations = 1)
# save processed image
img_name_root = os.path.splitext(img_name)[0]
processed_img_name = img_name_root + '_processed.png'
processed_img_dir = processed_img_name
cv2.imwrite(processed_img_dir, processed_img)
return processed_img_name
def main():
capture(1, 'test_img.png')
increase_contrast('test_img.png')
word_segment('test_img_processed.png')
# segment('test_img_processed.png')
if __name__ == '__main__':
main()
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,552 | tkerr97/PyScribe | refs/heads/master | /utils.py | import json
import os
from emnist import extract_training_samples as em
import numpy as np
import tensorflow as tf
import cv2
from skimage.transform import rescale, resize
chars = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z'
]
def get_char(index):
global chars
return chars[index]
def load_images():
images, labels = em('byclass')
return images, labels
def enable_cuda():
tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
def output_model(model, name):
model.save("models/" + name + ".h5", save_format="tf")
def load_model(name):
model = tf.keras.models.load_model(name)
return model
def increase_contrast(img):
# load image
img_gray = img
# increase contrast
threshold = 150
img_gray = 255 - img_gray # invert color
for row in range(img_gray.shape[0]):
for col in range(img_gray.shape[1]):
# print(img_gray[row][col])
if img_gray[row][col] < threshold:
img_gray[row][col] = 0
else:
img_gray[row][col] = 255
# img_gray = 255 - img_gray #invert color back
img_gray = cv2.GaussianBlur(img_gray, (13, 13), 0)
# increase line width
kernel = np.ones((3, 3), np.uint8)
processed_img = cv2.dilate(img_gray, kernel, iterations=1)
return processed_img
def get_processed_image(image):
image = increase_contrast(image)
cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU, image)
return image
def resize_letter(letter):
letter = cv2.resize(letter, dsize=(20, 20), interpolation=cv2.INTER_AREA)
letter = np.pad(letter, [(4,), (4,)], mode='constant')
cv2.imshow('letter', letter)
cv2.waitKey(0)
letter = np.asarray(letter).astype(dtype="float32").reshape((1, 28, 28, 1))
letter /= 255
return letter
def get_letters(image):
contours, heir = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
letters = []
images = []
for ctr in contours:
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting letter
letter = image[y:y + h, x:x + w]
if len(letter) < 100:
continue
letter = np.pad(letter, [(30,), (30,)])
images.append(letter)
letters.append(resize_letter(letter))
return letters, images
def test_model(model, name, show=False):
im = cv2.imread(f"pictures/{name}", cv2.IMREAD_GRAYSCALE)
text = ""
letters, images = get_letters(get_processed_image(im))
for i, letter in enumerate(letters):
res = model.predict(letter).argmax()
char = get_char(res)
if show:
cv2.imshow(char, images[i])
cv2.waitKey()
text += char
return text
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,553 | tkerr97/PyScribe | refs/heads/master | /svm/main.py | from skimage.feature import hog
from sklearn import svm
import sklearn.model_selection as sk
from sklearn.decomposition import PCA
from utils import load_images
images, labels = load_images()
images = images[0:10000]
labels = labels[0:10000]
hogs = []
for image in images:
hogs.append(hog(image, pixels_per_cell=(7, 7), cells_per_block=(4, 4)))
print("Done")
X_train, X_test, y_train, y_test = sk.train_test_split(hogs, labels, test_size=.15)
pca = PCA(n_components=60)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
model = svm.SVC()
model.fit(X_train, y_train)
predictions = [int(pred) for pred in model.predict(X_test)]
correct = sum(int(pred == y) for pred, y in zip(predictions, y_test))
print(100*correct/len(predictions))
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,554 | tkerr97/PyScribe | refs/heads/master | /text_detection/src/prepare_image.py | import numpy as np
import cv2
import os
def increase_contrast(img_name: str):
# load image
img_dir = os.path.join('../data/', img_name)
img_gray = cv2.imread(img_dir, cv2.IMREAD_GRAYSCALE)
# increase contrast
threshold = 150
img_gray = 255 - img_gray # invert color
for row in range(img_gray.shape[0]):
for col in range(img_gray.shape[1]):
# print(img_gray[row][col])
if (img_gray[row][col] < threshold):
img_gray[row][col] = 0
else:
img_gray[row][col] = 255
img_gray = 255 - img_gray #invert color back
# increase line width
kernel = np.ones((3, 3), np.uint8)
processed_img = cv2.erode(img_gray, kernel, iterations = 1)
# save processed image
img_name_root = os.path.splitext(img_name)[0]
processed_img_name = img_name_root + '_processed.png'
processed_img_dir = os.path.join('../data/', processed_img_name)
cv2.imwrite(processed_img_dir, processed_img)
return processed_img_name
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,555 | tkerr97/PyScribe | refs/heads/master | /main.py | import os
import sys
from pathlib import Path
from PySide2.QtCore import QUrl, QObject, Slot, Property, Signal, QtFatalMsg, QtCriticalMsg, QtWarningMsg, QtInfoMsg, \
qInstallMessageHandler
from PySide2.QtGui import QGuiApplication, QImage
from PySide2.QtQml import QQmlApplicationEngine
import tensorflow as tf
import numpy as np
import cv2
from skimage.transform import rescale, resize
from utils import load_model, increase_contrast, get_letters, get_processed_image, get_char
def qt_message_handler(mode, context, message):
if mode == QtInfoMsg:
mode = 'Info'
elif mode == QtWarningMsg:
mode = 'Warning'
elif mode == QtCriticalMsg:
mode = 'critical'
elif mode == QtFatalMsg:
mode = 'fatal'
else:
mode = 'Debug'
print("%s: %s (%s:%d, %s)" % (mode, message, context.file, context.line, context.file))
print("OpenCV version: " + cv2.__version__)
print("Tensorflow version: " + tf.__version__)
class MainWindow(QQmlApplicationEngine):
def __init__(self):
super().__init__()
self.load(os.path.join(os.getcwd(), "view.qml"))
qInstallMessageHandler(qt_message_handler)
self.rootContext().setContextProperty("MainWindow", self)
if os.name == "nt":
self.prefix = "file:///"
else:
self.prefix = "file://"
self.tmp_dir = "tmp"
if not os.path.exists("tmp"):
os.makedirs("tmp")
self.fileName = None
self.colorImage = None
self.image = None
self.bwImage = None
self.model = None
self.modelFolderName = None
if self.rootObjects():
self.window = self.rootObjects()[0]
self.imageField = self.window.findChild(QObject, "imagePreview")
self.modelText = self.window.findChild(QObject, "modelPreview")
else:
sys.exit(-1)
@Slot(str)
def selectFile(self, file):
self.fileName = file[len(self.prefix):]
self.colorImage = QUrl.fromLocalFile(self.fileName)
self.image = cv2.imread(self.fileName, cv2.IMREAD_GRAYSCALE)
path = Path(self.fileName)
newFileName = self.tmp_dir + "/"+path.name[:-len(path.suffix)] + "_bw" + path.suffix
cv2.imwrite(newFileName, self.image)
self.bwImage = QUrl.fromLocalFile(newFileName)
self.showProcessImage()
self.showContours()
self.showColor()
@Slot(str)
def selectModel(self, model):
self.modelFolderName = model[len(self.prefix):]
modelName = model.split("/")[-1]
self.modelText.setProperty("text", modelName)
self.model = load_model(self.modelFolderName)
@Slot()
def showContour(self):
self.imageField.setProperty("source", self.ctrImage)
@Slot()
def showColor(self):
self.imageField.setProperty("source", self.colorImage)
@Slot()
def showBW(self):
self.imageField.setProperty("source", self.bwImage)
@Slot()
def showProcess(self):
self.imageField.setProperty("source", self.procImage)
#def scale_to_emnist(self, image):
@Slot()
def showContours(self):
image = increase_contrast(self.image)
cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU, image)
contours, heir = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for ctr in contours:
x, y, w, h = cv2.boundingRect(ctr)
cv2.rectangle(image, (x,y), (x+w, y+h), (255, 255, 255), 3)
path = Path(self.fileName)
name = self.tmp_dir + "/"+path.name[:-len(path.suffix)] + "_ctr" + path.suffix
cv2.imwrite(name, image)
self.ctrImage = QUrl.fromLocalFile(name)
@Slot()
def showProcessImage(self):
image = increase_contrast(self.image)
cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU, image)
path = Path(self.fileName)
name = self.tmp_dir + "/"+path.name[:-len(path.suffix)] + "_prc" + path.suffix
cv2.imwrite(name, image)
self.procImage = QUrl.fromLocalFile(name)
@Slot()
def runModel(self):
self.text = ""
letters, _ = get_letters(get_processed_image(self.image))
for letter in letters:
self.text += get_char(self.model.predict(letter).argmax())
print(self.text)
@Slot(str)
def saveFile(self, filename):
with open(filename, 'w') as f:
f.write(self.text)
if __name__ == "__main__":
os.environ["QT_QUICK_CONTROLS_STYLE"] = "Material"
app = QGuiApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,556 | tkerr97/PyScribe | refs/heads/master | /cnns/cnn_2.py | import sklearn.model_selection as sk
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, BatchNormalization, Dropout, Flatten, Dense, MaxPool2D
import cv2
from utils import load_images, output_model, enable_cuda, test_model, load_model, get_char
images, labels = load_images()
images = images.reshape(images.shape[0], 28, 28, 1)
x_train, x_test, y_train, y_test = sk.train_test_split(images, labels, test_size=.15)
model = tf.keras.Sequential()
model.add(Conv2D(32, kernel_size=(3, 3)))
model.add(Conv2D(64, kernel_size=(4, 4)))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(3,3)))
model.add(Dropout(0.1))
model.add(Conv2D(128, kernel_size=(5, 5), strides=(2,2), padding="same"))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(2048))
model.add(Dropout(0.1))
model.add(Dense(1024))
model.add(Dense(62, activation="sigmoid"))
enable_cuda()
model.compile(optimzer='adamax',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
_, acc = model.evaluate(x_test, y_test)
print(f"Accuracy {acc}")
output_model(model, "model1")
print(test_model(model, "hello_world.png", True))
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,557 | tkerr97/PyScribe | refs/heads/master | /naivebayes/naivebayes.py | from emnist import extract_training_samples as em
from builtins import range, input
import numpy as np
from datetime import datetime
from scipy.stats import norm
from scipy.stats import multivariate_normal as mvn
# utils import load_images
def load_images():
images, labels = em('byclass')
return images, labels
class NaiveBayes(object):
def fit(self, X, Y, smoothing=1e-2):
self.gaussians = dict()
self.priors = dict()
labels = set(Y)
for c in labels:
current_x = X[Y == c]
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'var': current_x.var(axis=0) + smoothing,
}
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
P = self.predict(X)
return np.mean(P == Y)
def predict(self, X):
N, D = X.shape
K = len(self.gaussians)
P = np.zeros((N, K))
for c, g in self.gaussians.items():
mean, var = g['mean'], g['var']
P[:,c] = mvn.logpdf(X, mean=mean, cov=var) + np.log(self.priors[c])
return np.argmax(P, axis=1)
images, labels = load_images()
numImages = len(labels)
Ntrain = numImages // 2
counter = 0
new_images = np.zeros((numImages,784))
for image in images:
new_image = image.flatten()
new_images[counter] = new_image
counter +=1
Ntrain = len(labels) // 2
Xtrain, Ytrain = new_images[:Ntrain], labels[:Ntrain]
Xtest, Ytest = new_images[Ntrain:], labels[Ntrain:]
Xtrain.shape
model = NaiveBayes()
model.fit(Xtrain,Ytrain)
print("Train accuracy:", model.score(Xtrain, Ytrain))
print("Test accuracy:", model.score(Xtest, Ytest))
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,558 | tkerr97/PyScribe | refs/heads/master | /cnns/handwriting_model.py | import sklearn.model_selection as sk
import tensorflow as tf
from utils import load_images, enable_cuda, output_model
images, labels = load_images()
images = images.reshape(images.shape[0], 28, 28, 1)
images = images.astype("float32")
images /= 255
# Split the labels and images into train and test
train_images, test_images, train_labels, test_labels = sk.train_test_split(images, labels, test_size=.25)
# Set up the layers of the model
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=(1, 1), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(64, kernel_size=(1, 1), activation='relu'),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.Conv2D(128, kernel_size=(5, 5), activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2048, activation='relu'),
tf.keras.layers.Dropout(.2),
tf.keras.layers.Dense(2048, activation='relu'),
tf.keras.layers.Dropout(.2),
tf.keras.layers.Dense(62, activation='softmax')
])
# Check that TF is running on the GPU
enable_cuda()
# Train the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=4)
# Check the statistics
test_loss, test_acc = model.evaluate(test_images, test_labels)
print("Accuracy: ", test_acc)
output_model(model, "cnn0")
print("Saved model to disk")
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,559 | tkerr97/PyScribe | refs/heads/master | /text_detection/src/camera.py | import cv2
import os
# Camera class used to take picture of document
class Camera:
def __init__(self, camera_number, img_name):
self.camera_number = camera_number
self.img_name = img_name
def capture(self):
captured_img = False
cam = cv2.VideoCapture(self.camera_number)
cv2.namedWindow('Text Detection')
while True:
ret, frame = cam.read()
cv2.imshow("test", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
captured_img = False
break
elif k%256 == 32:
# SPACE pressed
img_dir = os.path.join('../data', self.img_name)
cv2.imwrite(img_dir, frame)
print("{} written!".format(self.img_name))
captured_img = True
break
cam.release()
cv2.destroyAllWindows()
return captured_img | {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,560 | tkerr97/PyScribe | refs/heads/master | /text_detection/src/main.py | from __future__ import division
from __future__ import print_function
import sys
import argparse
import cv2
import editdistance
from DataLoader import DataLoader, Batch
from Model import Model, DecoderType
from SamplePreprocessor import preprocess
from camera import Camera
from prepare_image import increase_contrast
from WordSegmentation import wordSegmentation, prepareImg
import os
import shutil
class FilePaths:
"filenames and paths to data"
fnCharList = '../model/charList.txt'
fnAccuracy = '../model/accuracy.txt'
fnTrain = '../data/'
fnCorpus = '../data/corpus.txt'
def infer(model, fnImg):
"recognize text in image provided by file path"
img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
print('Recognized:', '"' + recognized[0] + '"')
print('Probability:', probability[0])
return recognized
def main():
# optional command line args
parser = argparse.ArgumentParser()
parser.add_argument('--image_name', type=str, default='test_img.png') # make sure this ends in an image file extension
parser.add_argument('--camera_number', type=int, default=0)
args = parser.parse_args()
img_name = args.image_name
camera_number = args.camera_number
decoderType = DecoderType.BestPath
# capture image of document
cam = Camera(camera_number, img_name)
got_img = cam.capture()
if not got_img:
return
# prepare image for word segmentation
processed_img_name = increase_contrast(img_name)
# separate document image into word images
print('Segmenting words of sample %s'%img_name)
# read image, prepare it by resizing it to fixed height and converting it to grayscale
img = prepareImg(cv2.imread('../data/%s'%processed_img_name), 50)
# execute segmentation with given parameters
# -kernelSize: size of filter kernel (odd integer)
# -sigma: standard deviation of Gaussian function used for filter kernel
# -theta: approximated width/height ratio of words, filter function is distorted by this factor
# - minArea: ignore word candidates smaller than specified area
res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)
# write output to 'out/inputFileName' directory
if os.path.exists('../out/%s'%img_name):
shutil.rmtree('../out/%s'%img_name)
os.mkdir('../out/%s'%img_name)
else:
os.mkdir('../out/%s'%img_name)
# iterate over all segmented words
print('Segmented into %d words'%len(res))
for (j, w) in enumerate(res):
(wordBox, wordImg) = w
(x, y, w, h) = wordBox
cv2.imwrite('../out/%s/%d.png'%(img_name, j), wordImg) # save word
cv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image
# output summary image with bounding boxes around words
cv2.imwrite('../out/%s/summary.png'%img_name, img)
# analyze words
text = []
print(open(FilePaths.fnAccuracy).read())
model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True, dump=False)
for word in os.listdir(f'../out/{img_name}'):
if word != 'summary.png':
new_word = infer(model, os.path.join('../out/', img_name, word))
text.append(new_word)
print(f'Document Text: {text}')
if __name__ == '__main__':
main()
| {"/knnclassifier/knnclassifier.py": ["/utils.py"], "/svm/main.py": ["/utils.py"], "/main.py": ["/utils.py"], "/cnns/cnn_2.py": ["/utils.py"], "/cnns/handwriting_model.py": ["/utils.py"]} |
77,561 | jenn0727/Tiny_Yolo3 | refs/heads/master | /tiny_yolo.py | import torch
import torch.nn as nn
from yolo_layer import YoloLayer
from util import *
import torch.nn.functional as F
from collections import OrderedDict
from collections import defaultdict
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
class MaxPoolStride1(nn.Module):
def __init__(self):
super(MaxPoolStride1, self).__init__()
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)
return x
class tiny_yolo(nn.Module):
def __init__(self, config):
super(tiny_yolo, self).__init__()
self.config = config
self.loss_names = ["x", "y", "w", "h", "conf", "cls", "recall", "precision"]
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0])
self.conv_bn = [0, 4, 8, 12, 16, 20, 24, 27, 30, 36, 41]
self.conv = [33,44]
self.cnn = nn.Sequential(OrderedDict([
# 0 conv 0-2
('conv0', nn.Conv2d(3, 16, 3, 1, 1, bias=False)),
('bn0', nn.BatchNorm2d(16)),
('leaky0', nn.LeakyReLU(0.1, inplace=True)),
# 1 max 3
('max1', nn.MaxPool2d(2, 2)),
# 2 conv 4-6
('conv2', nn.Conv2d(16, 32, 3, 1, 1, bias=False)),
('bn2', nn.BatchNorm2d(32)),
('leaky2', nn.LeakyReLU(0.1, inplace=True)),
# 3 max 7
('pool3', nn.MaxPool2d(2, 2)),
# 4 conv 8-10
('conv4', nn.Conv2d(32, 64, 3, 1, 1, bias=False)),
('bn4', nn.BatchNorm2d(64)),
('leaky4', nn.LeakyReLU(0.1, inplace=True)),
# 5 max 11
('pool5', nn.MaxPool2d(2, 2)),
# 6 conv 12-14
('conv6', nn.Conv2d(64, 128, 3, 1, 1, bias=False)),
('bn6', nn.BatchNorm2d(128)),
('leaky6', nn.LeakyReLU(0.1, inplace=True)),
# 7 max 15
('pool7', nn.MaxPool2d(2, 2)),
# 8 conv 16-18
('conv8', nn.Conv2d(128, 256, 3, 1, 1, bias=False)),
('bn8', nn.BatchNorm2d(256)),
('leaky8', nn.LeakyReLU(0.1, inplace=True)),
# 9 max 19
('pool9', nn.MaxPool2d(2, 2)),
# 10 conv 20-22
('conv10', nn.Conv2d(256, 512, 3, 1, 1, bias=False)),
('bn10', nn.BatchNorm2d(512)),
('leaky10', nn.LeakyReLU(0.1, inplace=True)),
# 11 max 23
('pool11', MaxPoolStride1()),
# 12 conv 24-26
('conv12', nn.Conv2d(512, 1024, 3, 1, 1, bias=False)),
('bn12', nn.BatchNorm2d(1024)),
('leaky12', nn.LeakyReLU(0.1, inplace=True)),
# 13 conv 27-29
('conv13', nn.Conv2d(1024, 256, 1, 1, 0, bias=False)),
('bn13', nn.BatchNorm2d(256)),
('leaky13', nn.LeakyReLU(0.1, inplace=True)),
# 14 conv 30-32
('conv14', nn.Conv2d(256, 512, 3, 1, 1, bias=False)),
('bn14', nn.BatchNorm2d(512)),
('leaky14', nn.LeakyReLU(0.1, inplace=True)),
# 15 conv 33
('conv15', nn.Conv2d(512, 255, kernel_size=1, stride=1, padding=0)),
# 16 yolo 34
('yolo16', YoloLayer([3, 4, 5], self.config)),
# 17 route 35
('route17', EmptyModule()),
# 18 conv 36-38
('conv18', nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0)),
('bn18', nn.BatchNorm2d(128)),
('leaky18', nn.LeakyReLU(0.1, inplace=True)),
# 19 upsample 39
('upsample', nn.Upsample(scale_factor=2)),
# 20 route 40
('route20', EmptyModule()),
# 21 conv 41-43
('conv21', nn.Conv2d(384, 256, 3, 1, 1, bias=False)),
('bn21', nn.BatchNorm2d(256)),
('leaky21', nn.LeakyReLU(0.1, inplace=True)),
# 22 conv 44
('conv22', nn.Conv2d(256, 255, kernel_size=1, stride=1, padding=0)),
# 23 yolo 45
('yolo23', YoloLayer([0, 1, 2], self.config)),
]))
"""
def Conv_BN_Leaky(self, in_channel, out_channel, kernel_size, padding, bias=False):
conv_bn_leaky = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size, padding, bias),
nn.BatchNorm2d(out_channel),
nn.LeakyReLU(0.1, inplace=True)
)
return conv_bn_leaky
"""
def forward(self, x, targets =None):
self.losses = defaultdict(float)
out_boxes = []
output= []
for i in range(19):
x = self.cnn[i](x)
x1 = x
# x1:26*26*256
for i in range(19,30):
x= self.cnn[i](x)
x2 = x
# x2:13*13*256
for i in range(30,34):
x = self.cnn[i](x)
y1 = x
for i in range(36,40):
x2 = self.cnn[i](x2)
# x2:26*26*128
#20 route 40th
x = torch.cat((x2,x1), 1)
# x:26*26*384
for i in range(41,45):
x = self.cnn[i](x)
y2 = x
if self.config.is_train:
x, *losses = self.cnn[34](y1, targets)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
output.append(x)
x, *losses = self.cnn[45](y2, targets)
for name, loss in zip(self.loss_names, losses):
self.losses[name] += loss
output.append(x)
else:
boxes = self.yolo_layer1(y1,targets)
out_boxes.append(boxes)
boxes = self.yolo_layer1(y2,targets)
out_boxes.append(boxes)
self.losses["recall"] /= 3
self.losses["precision"] /= 3
return sum(output) if self.config.is_train else torch.cat(out_boxes,1)
def load_weights(self, weightfile):
# Open the weights file
fp = open(weightfile, "rb")
header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values
# Needed to write header when saving weights
self.header_info = header
self.seen = header[3]
buf = np.fromfile(fp, dtype=np.float32) # The rest are weights
fp.close()
start = 0
"""
for i in self.conv_bn[0:-2]:
start = load_conv_bn(buf, start, self.cnn[i], self.cnn[i+1])
print(i)
"""
start = load_conv_bn(buf, start, self.cnn[0], self.cnn[1])
start = load_conv_bn(buf, start, self.cnn[4], self.cnn[5])
start = load_conv_bn(buf, start, self.cnn[8], self.cnn[9])
start = load_conv_bn(buf, start, self.cnn[12], self.cnn[13])
start = load_conv_bn(buf, start, self.cnn[16], self.cnn[17])
start = load_conv_bn(buf, start, self.cnn[20], self.cnn[21])
start = load_conv_bn(buf, start, self.cnn[24], self.cnn[25])
start = load_conv_bn(buf, start, self.cnn[27], self.cnn[28])
start = load_conv_bn(buf, start, self.cnn[30], self.cnn[31])
start = load_conv(buf, start, self.cnn[33])
start = load_conv_bn(buf, start, self.cnn[36], self.cnn[37])
start = load_conv_bn(buf, start, self.cnn[41], self.cnn[42])
start = load_conv(buf, start, self.cnn[44])
def save_weights(self, outfile):
fp = open(outfile, 'wb')
self.header_info[3] = self.seen
self.header_info.tofile(fp)
for i in range(len(self.cnn)):
if i in self.conv_bn:
save_conv_bn(fp, self.cnn[i], self.cnn[i+1])
if i in self.conv:
save_conv(fp, self.cnn[i])
fp.close()
| {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,562 | jenn0727/Tiny_Yolo3 | refs/heads/master | /config.py | import argparse
arg_lists = []
parser = argparse.ArgumentParser(description='tiny_yolo')
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
def str2bool(v):
return v.lower() in ('true', '1')
def get_config():
config, unparsed = parser.parse_known_args()
return config, unparsed
# data params
data_arg = add_argument_group('Data Params')
data_arg.add_argument('--batch_size', type=int, default=1, help='# of images in each batch of data')
data_arg.add_argument('--max_batches', type=int, default=40200, help='# max_batches')
data_arg.add_argument('--class_num', type=int, default=80, help='Number of classes')
data_arg.add_argument('--steps', type=list, default=[-1,100,20000,30000], help='steps')
data_arg.add_argument('--momentum', type=float, default=0.9, help='momentum')
data_arg.add_argument('--scales', type=list, default=[.1,10,.1,.1], help='scales')
data_arg.add_argument('--decay', type=float, default=0.0005, help='decay')
# training params
train_arg = add_argument_group('Training Params')
train_arg.add_argument('--is_train', type=str2bool, default=True, help='Whether to train or test the model')
train_arg.add_argument('--epochs', type=int, default=30, help='# of epochs to train for')
train_arg.add_argument('--init_lr', type=float, default=0.001, help='Initial learning rate')
train_arg.add_argument('--train_patience', type=int, default=50, help='Number of epochs to wait before stopping train')
train_arg.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights")
train_arg.add_argument(
"--checkpoint_dir", type=str, default='./ckpt/', help="directory where model checkpoints are saved"
)
train_arg.add_argument('--weightfile', type=str, default='yolov3-tiny.weights', help='path of the weight file')
train_arg.add_argument('--train_txt', type=str, default='data/train.txt', help='path of the train image')
train_arg.add_argument('--n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')
# testing params
test_arg = add_argument_group('Testing Params')
test_arg.add_argument('--anchors', type=list, default=[10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319], help='the value of anchors')
test_arg.add_argument('--num_anchors', type=int, default=6, help='Number of anchors')
test_arg.add_argument('--test_txt', type=str, default='data/test.txt', help='path of the train image')
test_arg.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
test_arg.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
test_arg.add_argument("--nms_thres", type=float, default=0.45, help="iou thresshold for non-maximum suppression")
# other params
other_arg = add_argument_group('Other Params')
other_arg.add_argument('--use_gpu', type=str2bool, default=False, help='Whether to run on GPU')
| {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,563 | jenn0727/Tiny_Yolo3 | refs/heads/master | /util.py | import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def file_lines(file_path):
count = 0
file = open(file_path, 'rb')
#file : txt
while True:
buffer = file.read(8192*1024)
if not buffer:
break
count += buffer.count('\n')
file.close( )
return count
#we do not use the code as follow because of large files
#count = len(open(filepath,'rU').readlines())
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
#print(num_w, num_b)
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b]).view_as(conv_model.bias)); start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight)); start = start + num_w
return start
def save_conv(fp, conv_model):
if conv_model.bias.is_cuda:
convert2cpu(conv_model.bias.data).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
conv_model.bias.data.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
#print(num_w, num_b)
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight)); start = start + num_w
return start
def save_conv_bn(fp, conv_model, bn_model):
if bn_model.bias.is_cuda:
convert2cpu(bn_model.bias.data).numpy().tofile(fp)
convert2cpu(bn_model.weight.data).numpy().tofile(fp)
convert2cpu(bn_model.running_mean).numpy().tofile(fp)
convert2cpu(bn_model.running_var).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
bn_model.bias.data.numpy().tofile(fp)
bn_model.weight.data.numpy().tofile(fp)
bn_model.running_mean.numpy().tofile(fp)
bn_model.running_var.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([0.0], precision, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def bbox_iou_numpy(box1, box2):
"""Computes IoU between bounding boxes.
Parameters
----------
box1 : ndarray
(N, 4) shaped array with bboxes
box2 : ndarray
(M, 4) shaped array with bboxes
Returns
-------
: ndarray
(N, M) shaped array with IoUs
"""
area = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1])
iw = np.minimum(np.expand_dims(box1[:, 2], axis=1), box2[:, 2]) - np.maximum(
np.expand_dims(box1[:, 0], 1), box2[:, 0]
)
ih = np.minimum(np.expand_dims(box1[:, 3], axis=1), box2[:, 3]) - np.maximum(
np.expand_dims(box1[:, 1], 1), box2[:, 1]
)
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((box1[:, 2] - box1[:, 0]) * (box1[:, 3] - box1[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def non_max_suppression(prediction, num_classes, conf_thres, nms_thres):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()
image_pred = image_pred[conf_mask]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(image_pred[:, 5 : 5 + num_classes], 1, keepdim=True)
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
# Iterate through all predicted classes
unique_labels = detections[:, -1].cpu().unique()
if prediction.is_cuda:
unique_labels = unique_labels.cuda()
for c in unique_labels:
# Get the detections with the particular class
detections_class = detections[detections[:, -1] == c]
# Sort the detections by maximum objectness confidence
_, conf_sort_index = torch.sort(detections_class[:, 4], descending=True)
detections_class = detections_class[conf_sort_index]
# Perform non-maximum suppression
max_detections = []
while detections_class.size(0):
# Get detection with highest confidence and save as max detection
max_detections.append(detections_class[0].unsqueeze(0))
# Stop if we're at the last detection
if len(detections_class) == 1:
break
# Get the IOUs for all boxes with lower confidence
ious = bbox_iou(max_detections[-1], detections_class[1:])
# Remove detections with IoU >= NMS threshold
detections_class = detections_class[1:][ious < nms_thres]
max_detections = torch.cat(max_detections).data
# Add max detections to outputs
output[image_i] = (
max_detections if output[image_i] is None else torch.cat((output[image_i], max_detections))
)
return output
def do_detect(model, img, conf_thresh, nms_thresh, cuda=0):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
detections = model(img)
detections = non_max_suppression(detections, 80, conf_thresh, nms_thresh)
return detections
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def plot_boxes(img, boxes, savename, class_names=None):
colors = torch.FloatTensor([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]);
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
image = np.array(img)
# The amount of padding that was added
pad_x = max(image.shape[0] - image.shape[1], 0) * (416 / max(image.shape))
pad_y = max(image.shape[1] - image.shape[0], 0) * (416 / max(image.shape))
# Image height and width after padding is removed
unpad_h = 416 - pad_y
unpad_w = 416 - pad_x
draw = ImageDraw.Draw(img)
box = boxes[0]
if box is not None:
for x1, y1, x2, y2, conf, cls_conf, cls_pred in box:
print('\t Label: %s, Conf: %.5f' % (class_names[int(cls_pred)], cls_conf.item()))
y1 = ((y1 - pad_y // 2) / unpad_h) * image.shape[0]
x1 = ((x1 - pad_x // 2) / unpad_w) * image.shape[1]
y2 = ((y2 - pad_y // 2) / unpad_h) * image.shape[0]
x2 = ((x2 - pad_x // 2) / unpad_w) * image.shape[1]
if y1 <0:
y1 = 0
if y2<0:
y2 = 0
if x1<0:
x1 = 0
if x2 <0:
x2 = 0
classes = len(class_names)
offset = int(cls_pred) * 123457 % classes
#????
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
if float(cls_conf)>0.7:
draw.text((x1, y1), class_names[int(cls_pred)], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline=rgb)
if savename:
print("save plot results to %s" % savename)
img.save(savename)
return img
def prepare_dirs(config):
path = config.checkpoint_dir
if not os.path.exists(path):
os.mkdir(path) | {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,564 | jenn0727/Tiny_Yolo3 | refs/heads/master | /trainer.py | import torch
import torch.nn as nn
from tiny_yolo import tiny_yolo
from torchvision import datasets, transforms
from torch.autograd import Variable
import time
import torch.optim as optim
from util import *
from dataset import *
import tqdm
class Trainer(object):
def __init__(self, config):
super(Trainer, self).__init__()
self.config = config
self.epochs = self.config.epochs
cuda = torch.cuda.is_available() and self.config.use_gpu is True
self.model = tiny_yolo(self.config)
self.model.load_weights(self.config.weightfile)
#self.model = SE_yolo(self.config, pre_model)
print(self.model)
print('[*] Number of model parameters: {:,}'.format(sum([p.data.nelement() for p in self.model.parameters()])))
if cuda:
self.model = self.model.cuda()
self.optimizer = optim.SGD(self.model.parameters(), lr=self.config.init_lr/self.config.batch_size, momentum=self.config.momentum, dampening=0, weight_decay=self.config.decay*self.config.batch_size)
self.Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def train(self):
# loading training data
t0 =time.time()
train_path = self.config.train_txt
dataloader = torch.utils.data.DataLoader(
ListDataset(train_path), batch_size=self.config.batch_size, shuffle=False, num_workers=self.config.n_cpu
)
best_model_wts = self.model.state_dict()
self.model.train()
for epoch in range(1,self.epochs+1):
for batch_i, (_, imgs, targets) in enumerate(dataloader):
t2= time.time()
imgs = Variable(imgs.type(self.Tensor))
targets = Variable(targets.type(self.Tensor), requires_grad=False)
self.optimizer.zero_grad()
loss = self.model(imgs, targets)
loss.backward()
self.optimizer.step()
print(
"[Epoch %d/%d, Batch %d/%d] [Losses: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f, recall: %.5f, precision: %.5f]"
% (
epoch,
self.config.epochs,
batch_i,
len(dataloader),
self.model.losses["x"],
self.model.losses["y"],
self.model.losses["w"],
self.model.losses["h"],
self.model.losses["conf"],
self.model.losses["cls"],
loss.item(),
self.model.losses["recall"],
self.model.losses["precision"],
)
)
self.model.seen += imgs.size(0)
if epoch % self.config.checkpoint_interval == 0:
self.model.save_weights("%s/%d.weights" % (self.config.checkpoint_dir, epoch))
#torch.save(self.model, os.path.join(self.config.checkpoint_dir, "epoch_" + str(epoch) + ".pth.tar"))
self.model.load_state_dict(best_model_wts)
def test(self):
test_path = self.config.test_txt
dataset = ListDataset(test_path)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.config.batch_size, shuffle=False,
num_workers=self.config.n_cpu)
self.model.eval()
num_classes = self.config.class_num
all_detections = []
all_annotations = []
for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
imgs = Variable(imgs.type(self.Tensor))
with torch.no_grad():
outputs = self.model(imgs)
outputs = non_max_suppression(outputs, 80, conf_thres=self.config.conf_thres, nms_thres=self.config.nms_thres)
for output, annotations in zip(outputs, targets):
all_detections.append([np.array([]) for _ in range(num_classes)])
if output is not None:
# Get predicted boxes, confidence scores and labels
pred_boxes = output[:, :5].cpu().numpy()
scores = output[:, 4].cpu().numpy()
pred_labels = output[:, -1].cpu().numpy()
# Order by confidence
sort_i = np.argsort(scores)
pred_labels = pred_labels[sort_i]
pred_boxes = pred_boxes[sort_i]
for label in range(num_classes):
all_detections[-1][label] = pred_boxes[pred_labels == label]
all_annotations.append([np.array([]) for _ in range(num_classes)])
if any(annotations[:, -1] > 0):
annotation_labels = annotations[annotations[:, -1] > 0, 0].numpy()
_annotation_boxes = annotations[annotations[:, -1] > 0, 1:]
# Reformat to x1, y1, x2, y2 and rescale to image dimensions
annotation_boxes = np.empty_like(_annotation_boxes)
annotation_boxes[:, 0] = _annotation_boxes[:, 0] - _annotation_boxes[:, 2] / 2
annotation_boxes[:, 1] = _annotation_boxes[:, 1] - _annotation_boxes[:, 3] / 2
annotation_boxes[:, 2] = _annotation_boxes[:, 0] + _annotation_boxes[:, 2] / 2
annotation_boxes[:, 3] = _annotation_boxes[:, 1] + _annotation_boxes[:, 3] / 2
annotation_boxes *= 416
for label in range(num_classes):
all_annotations[-1][label] = annotation_boxes[annotation_labels == label, :]
average_precisions = {}
for label in range(num_classes):
true_positives = []
scores = []
num_annotations = 0
for i in tqdm.tqdm(range(len(all_annotations)), desc=("Computing AP for class '{%s}'" %(label))):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for *bbox, score in detections:
scores.append(score)
if annotations.shape[0] == 0:
true_positives.append(0)
continue
overlaps = bbox_iou_numpy(np.expand_dims(bbox, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= self.config.iou_thres and assigned_annotation not in detected_annotations:
true_positives.append(1)
detected_annotations.append(assigned_annotation)
else:
true_positives.append(0)
# no annotations -> AP for this class is 0
if num_annotations == 0:
average_precisions[label] = 0
continue
true_positives = np.array(true_positives)
false_positives = np.ones_like(true_positives) - true_positives
# sort by score
indices = np.argsort(-np.array(scores))
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
print("Average Precisions:")
for c, ap in average_precisions.items():
print(" Class '{%s}' - AP: %f" % (c, ap))
mAP = np.mean(list(average_precisions.values()))
print("mAP: {%f}" % (mAP))
| {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,565 | jenn0727/Tiny_Yolo3 | refs/heads/master | /detect.py |
from util import *
from tiny_yolo import tiny_yolo
#from SE_yolo import SE_yolo
from config import get_config
from trainer import Trainer
from util import prepare_dirs
def detect(config, weightfile, imgfile):
m = tiny_yolo(config)
#m.print_network()
m.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
num_classes = config.class_num
namesfile = 'data/coco.names'
'''
num_classes = 80
if num_classes == 20:
namesfile = ''
elif num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
'''
cuda = torch.cuda.is_available() and config.use_gpu
if cuda:
m.cuda()
img = Image.open(imgfile).convert('RGB')
sized = img.resize((416, 416))
start = time.time()
boxes = do_detect(m, sized, 0.5, 0.5, cuda)
print(boxes)
finish = time.time()
print('%s: Predicted in %f seconds.' % (imgfile, (finish-start)))
class_names = load_class_names(namesfile)
plot_boxes(img, boxes, 'prediction.jpg', class_names)
def main(config):
prepare_dirs(config)
trainer = Trainer(config)
if config.is_train:
trainer.train()
else:
# load a pre-trained model and test
trainer.test()
if __name__ == '__main__':
config, unparsed = get_config()
main(config)
'''
weightfile = 'yolov3-tiny.weights'
imgfile = 'data/13.jpg'
detect(config, weightfile, imgfile)
if len(sys.argv) == 3:
weightfile = sys.argv[1]
imgfile = sys.argv[2]
detect(weightfile, imgfile)
else:
print('Usage: ')
print(' python detect.py cfgfile weightfile imgfile')
''' | {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,566 | jenn0727/Tiny_Yolo3 | refs/heads/master | /yolo_layer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torch.autograd import Variable
from util import *
def build_targets(
pred_boxes, pred_conf, pred_cls, target, anchors, num_anchors, num_classes, grid_size, ignore_thres
):
nB = target.size(0)
nA = num_anchors
nC = num_classes
nG = grid_size #13
mask = torch.zeros(nB, nA, nG, nG)
conf_mask = torch.ones(nB, nA, nG, nG)
tx = torch.zeros(nB, nA, nG, nG)
ty = torch.zeros(nB, nA, nG, nG)
tw = torch.zeros(nB, nA, nG, nG)
th = torch.zeros(nB, nA, nG, nG)
tconf = torch.ByteTensor(nB, nA, nG, nG).fill_(0)
tcls = torch.ByteTensor(nB, nA, nG, nG, nC).fill_(0)
nGT = 0
nCorrect = 0
for b in range(nB):
for t in range(target.shape[1]):
if target[b, t].sum() == 0:
continue
nGT += 1
# Convert to position relative to box
gx = target[b, t, 1] * nG
gy = target[b, t, 2] * nG
gw = target[b, t, 3] * nG
gh = target[b, t, 4] * nG
# Get grid box indices
gi = int(gx)
gj = int(gy)
# Get shape of gt box
gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0)
# Get shape of anchor box
anchor_shapes = torch.FloatTensor(np.concatenate((np.zeros((len(anchors), 2)), np.array(anchors)), 1))
# Calculate iou between gt and anchor shapes
anch_ious = bbox_iou(gt_box, anchor_shapes)
# Where the overlap is larger than threshold set mask to zero (ignore)
conf_mask[b, anch_ious > ignore_thres, gj, gi] = 0
# Find the best matching anchor box
best_n = np.argmax(anch_ious)
# Get ground truth box
gt_box = torch.FloatTensor(np.array([gx, gy, gw, gh])).unsqueeze(0)
# Get the best prediction
pred_box = pred_boxes[b, best_n, gj, gi].unsqueeze(0)
# Masks
mask[b, best_n, gj, gi] = 1
conf_mask[b, best_n, gj, gi] = 1
# Coordinates
tx[b, best_n, gj, gi] = gx - gi
ty[b, best_n, gj, gi] = gy - gj
# Width and height
tw[b, best_n, gj, gi] = math.log(gw / anchors[best_n][0] + 1e-16)
th[b, best_n, gj, gi] = math.log(gh / anchors[best_n][1] + 1e-16)
# One-hot encoding of label
target_label = int(target[b, t, 0])
tcls[b, best_n, gj, gi, target_label] = 1
tconf[b, best_n, gj, gi] = 1
# Calculate iou between ground truth and best matching prediction
iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)
pred_label = torch.argmax(pred_cls[b, best_n, gj, gi])
score = pred_conf[b, best_n, gj, gi]
if iou > 0.5 and pred_label == target_label and score > 0.5:
nCorrect += 1
return nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls
class YoloLayer(nn.Module):
def __init__(self, anchor_mask, config):
super(YoloLayer, self).__init__()
self.config = config
self.anchors = self.config.anchors
self.anchors = [(self.anchors[i], self.anchors[i + 1]) for i in range(0, len(self.anchors), 2)]
self.anchor = [self.anchors[i] for i in anchor_mask]
self.num_anchors = len(self.anchor)
self.num_classes = self.config.class_num
self.bbox_attrs = 5 + self.num_classes
self.image_dim = 416 ##
self.ignore_thres = 0.7
self.lambda_coord = 1
self.mse_loss = nn.MSELoss(size_average=True) # Coordinate loss
self.bce_loss = nn.BCELoss(size_average=True) # Confidence loss
self.ce_loss = nn.CrossEntropyLoss() # Class loss
def forward(self, x, targets=None):
nA = self.num_anchors
nB = x.size(0)
nG = x.size(2)
#nH = x.size(2)
stride = self.image_dim / nG
# Tensors for cuda support
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor
prediction = x.view(nB, nA, self.bbox_attrs, nG, nG).permute(0, 1, 3, 4, 2).contiguous()
# Get outputs
x = torch.sigmoid(prediction[..., 0]) # Center x
y = torch.sigmoid(prediction[..., 1]) # Center y
w = prediction[..., 2] # Width
h = prediction[..., 3] # Height
pred_conf = torch.sigmoid(prediction[..., 4]) # Conf
pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
# Calculate offsets for each grid
grid_x = torch.arange(nG).repeat(nG, 1).view([1, 1, nG, nG]).type(FloatTensor)
grid_y = torch.arange(nG).repeat(nG, 1).t().view([1, 1, nG, nG]).type(FloatTensor)
scaled_anchors = FloatTensor([(a_w / stride, a_h / stride) for a_w, a_h in self.anchor])
anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))
anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))
# Add offset and scale with anchors
pred_boxes = FloatTensor(prediction[..., :4].shape)
pred_boxes[..., 0] = x.data + grid_x
pred_boxes[..., 1] = y.data + grid_y
pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
# Training
if targets is not None:
if x.is_cuda:
self.mse_loss = self.mse_loss.cuda()
self.bce_loss = self.bce_loss.cuda()
self.ce_loss = self.ce_loss.cuda()
nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls = build_targets(
pred_boxes=pred_boxes.cpu().data,
pred_conf=pred_conf.cpu().data,
pred_cls=pred_cls.cpu().data,
target=targets.cpu().data,
anchors=scaled_anchors.cpu().data,
num_anchors=nA,
num_classes=self.num_classes,
grid_size=nG,
ignore_thres=self.ignore_thres,
)
nProposals = int((pred_conf > 0.5).sum().item())
recall = float(nCorrect / nGT) if nGT else 1
if nProposals >0:
precision = float(nCorrect / nProposals)
else:
precision = 0
# Handle masks
mask = Variable(mask.type(ByteTensor))
conf_mask = Variable(conf_mask.type(ByteTensor))
# Handle target variables
tx = Variable(tx.type(FloatTensor), requires_grad=False)
ty = Variable(ty.type(FloatTensor), requires_grad=False)
tw = Variable(tw.type(FloatTensor), requires_grad=False)
th = Variable(th.type(FloatTensor), requires_grad=False)
tconf = Variable(tconf.type(FloatTensor), requires_grad=False)
tcls = Variable(tcls.type(LongTensor), requires_grad=False)
# Get conf mask where gt and where there is no gt
conf_mask_true = mask
conf_mask_false = conf_mask - mask
# Mask outputs to ignore non-existing objects
loss_x = self.mse_loss(x[mask], tx[mask])
loss_y = self.mse_loss(y[mask], ty[mask])
loss_w = self.mse_loss(w[mask], tw[mask])
loss_h = self.mse_loss(h[mask], th[mask])
loss_conf = self.bce_loss(pred_conf[conf_mask_false], tconf[conf_mask_false]) + self.bce_loss(
pred_conf[conf_mask_true], tconf[conf_mask_true]
)
loss_cls = (1 / nB) * self.ce_loss(pred_cls[mask], torch.argmax(tcls[mask], 1))
loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls
return (
loss,
loss_x.item(),
loss_y.item(),
loss_w.item(),
loss_h.item(),
loss_conf.item(),
loss_cls.item(),
recall,
precision,
)
else:
# If not in training phase return predictions
output = torch.cat(
(
pred_boxes.view(nB, -1, 4) * stride,
pred_conf.view(nB, -1, 1),
pred_cls.view(nB, -1, self.num_classes),
),
-1,
)
return output
| {"/tiny_yolo.py": ["/yolo_layer.py", "/util.py"], "/trainer.py": ["/tiny_yolo.py", "/util.py"], "/detect.py": ["/util.py", "/tiny_yolo.py", "/config.py", "/trainer.py"], "/yolo_layer.py": ["/util.py"]} |
77,571 | dbhoan/Amazon_rating_prediction | refs/heads/master | /evaluation.py | # -*- coding: utf-8 -*-
# Author: Hoan Bui Dang
# Python: 3.6
"""
Home-made tools for model evaluation.
"""
import pandas as pd
import numpy as np
def confusion(predicted,fact):
count_total = len(predicted)
true_pos = sum((predicted == 1) & (fact == 1)).astype(int)
true_neg = sum((predicted == 0) & (fact == 0)).astype(int)
false_pos = sum((predicted == 1) & (fact == 0)).astype(int)
false_neg = sum((predicted == 0) & (fact == 1)).astype(int)
accuracy = (true_pos + true_neg)/count_total
pos_recall = true_pos / (true_pos + false_neg)
pos_precision = true_pos / (true_pos + false_pos)
neg_recall = true_neg / (true_neg + false_pos)
neg_precision = true_neg / (true_neg + false_neg)
f1_score = 2/((1/pos_recall) + (1/pos_precision))
print('Confusion matrix:')
print('%7d %7d' % (true_pos, false_pos))
print('%7d %7d' % (false_neg, true_neg))
print()
print('Accuracy : %.3f' % accuracy)
print('Pos recall : %.3f' % pos_recall)
print('Pos precision : %.3f' % pos_precision)
print('Neg recall : %.3f' % neg_recall)
print('Neg precision : %.3f' % neg_precision)
print('F1 score:', f1_score)
def KS_chart(score, target):
""" aka AR chart """
if any((target!=0) & (target!=1)):
print('Target for gain_chart must contain only 0 and 1.')
return
df = pd.DataFrame({'score': score, 'target':target})
df = df.sort_values(by='score')
L = len(df)
target_count = sum(df.target)
x = np.linspace(0,1,1001)
y = np.array([])
z = np.array([])
for i in x.tolist():
partial = df.iloc[:int(L*i)]
y = np.append(y, sum(partial.target)/target_count)
z = np.append(z, (len(partial)-sum(partial.target))/(L-target_count))
diff = y-z
ind_max = np.argmax(diff)
print('KS: ',np.max(diff))
print('Score at max:', score[ind_max])
return np.column_stack((x,y,z))
| {"/logistic_regression.py": ["/evaluation.py"]} |
77,572 | dbhoan/Amazon_rating_prediction | refs/heads/master | /logistic_regression.py | # -*- coding: utf-8 -*-
# Author: Hoan Bui Dang
# Python: 3.6
import pandas as pd
import numpy as np
import re
from matplotlib import pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.linear_model import LogisticRegression
from nltk.corpus import stopwords # Import the stop word list
from evaluation import confusion
def clean_review(s):
return ' '.join(re.sub('[^a-zA-Z]',' ', s).lower().split())
print('Loading train data... ', end ='')
df = pd.read_csv('data_train.csv')
print('Done.')
# plot histogram of ratings
plt.hist(df.overall, bins = range(0,3))
plt.show()
train = pd.DataFrame()
train['rating'] = df['overall']
train['review'] = df['reviewText']
print('Cleaning train data... ', end = '')
# remove NaN from reviewText
train = train[~train['review'].isnull()]
train = train[~train['rating'].isnull()]
train['review'] = train['review'].apply(clean_review)
print('Done.')
#stopwords.words('English')
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 10000, \
ngram_range = (1,2) )
print('Vectorizing text... ', end = '')
features = vectorizer.fit_transform(train['review'].tolist())
vocab = vectorizer.get_feature_names()
dist = np.sum(features, axis=0)
print('Done.')
print('Training the model... ', end = '')
#classifier = MultinomialNB()
classifier = LogisticRegression()
model = classifier.fit(features,train['rating'])
print('Done.')
print('Loading test data... ', end = '')
df2 = pd.read_csv('data_test.csv')
print('Done.')
print('Processing test data... ', end = '')
test = pd.DataFrame()
test['rating'] = df2['overall']
test['review'] = df2['reviewText']
test = test[~test['review'].isnull()]
test = test[~test['rating'].isnull()]
test['review'] = test['review'].apply(clean_review)
test_features = vectorizer.transform(test['review'].tolist())
print('Done.')
print('Testing model... ', end = '')
predict = model.predict(test_features)
test['predict'] = predict
#test.to_csv('reviews_home_binary_test_predict_LogisticRegression.csv', index=False)
print('Done.')
confusion(predict, test['rating'])
plt.hist(predict, bins = range(0,3))
plt.show() | {"/logistic_regression.py": ["/evaluation.py"]} |
77,578 | marcelosalloum/snippet-api | refs/heads/master | /app/api/errors.py | from app import db
from app.api import bp
from flask import jsonify
from werkzeug.http import HTTP_STATUS_CODES
def error_response(status_code, message=None):
payload = {'error': HTTP_STATUS_CODES.get(status_code, 'Unknown error')}
if message:
payload['message'] = message
payload['status_code'] = status_code
response = jsonify(payload)
response.status_code = status_code
return response
@bp.app_errorhandler(400)
def bad_request(message):
return error_response(400, message)
@bp.app_errorhandler(403)
def forbidden_error(error):
return error_response(403, message='This resource can\'t be accessed by your user')
@bp.app_errorhandler(404)
def not_found_error(error):
return error_response(404, message='This resource couldn\'t be found')
@bp.app_errorhandler(500)
def internal_error(error):
db.session.rollback()
return error_response(500, message='There was an internal error. Please contact the administrator')
| {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,579 | marcelosalloum/snippet-api | refs/heads/master | /migrations/versions/63e27b9e088e_password.py | """password
Revision ID: 63e27b9e088e
Revises: 9789e67f0a69
Create Date: 2019-12-20 17:12:25.491783
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '63e27b9e088e'
down_revision = '9789e67f0a69'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('snippet', sa.Column('password_hash', sa.String(length=128), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('snippet', 'password_hash')
# ### end Alembic commands ###
| {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,580 | marcelosalloum/snippet-api | refs/heads/master | /app/models/__init__.py | from flask import Blueprint
bp = Blueprint('models', __name__)
from app.models.snippet import Snippet, SnippetScheme | {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,581 | marcelosalloum/snippet-api | refs/heads/master | /app/api/snippets.py | from app import db
from app.api import bp
from app.api.errors import bad_request, error_response
from app.models import Snippet, SnippetScheme
from datetime import datetime, timedelta
from dateutil.parser import parse
from flask import abort, g, jsonify, request, make_response, url_for
from marshmallow import fields
from app.config import Config
@bp.route('/snippets/<int:id>', methods=['GET'])
def get_snippet(id):
snippet = Snippet.query.get_or_404(id)
now = datetime.utcnow()
if snippet.expires < now:
return error_response(status_code=403, message="This resource has expired and is no longer available")
snippet.expires += timedelta(hours=Config.EXPIRATION_INCREASE_HOURS)
db.session.commit()
snippet_schema = SnippetScheme()
return jsonify(snippet_schema.dump(snippet))
@bp.route('/snippets', methods=['POST'])
def create_snippet():
data = request.get_json() or {}
# Validation
if 'name' not in data or 'snippet' not in data or 'expires' not in data:
return bad_request("The fields 'name', 'snippet' and 'expires' are mandatory!")
try:
parse(data['expires'])
except:
return bad_request("The 'expires' field couldn't be parsed to a valid datetime. To prevent this, make sure to adopt a widely known datetime format, like ISO 8601 (YYYY-MM-DDTHH:MM:SS.mmmmmm)")
# Object creation
snippet_scheme = SnippetScheme()
snippet = Snippet()
snippet_scheme.load(data, instance=snippet, partial=True)
if 'password' in data:
snippet.set_password(data['password'])
db.session.add(snippet)
db.session.commit()
# Response
response = jsonify(snippet_scheme.dump(snippet))
response.status_code = 201
response.headers['Location'] = url_for('api.get_snippet', id=snippet.id)
return response
@bp.route('/snippets/<int:id>', methods=['PUT'])
def update_snippet(id):
data = request.get_json() or {}
snippet = Snippet.query.get_or_404(id)
if 'password' not in data or not snippet.check_password(data['password']):
abort(403)
snippet_scheme = SnippetScheme()
snippet_scheme.load(data, instance=snippet, partial=True)
db.session.commit()
response = jsonify(snippet_scheme.dump(snippet))
response.status_code = 200
return response
# - OK: POST request to create the snippet: 'name', 'snippet', 'expires'
# - OK: The request to store the snippet should be replied to with a response that
# includes the URL where the snippet can be read.
# - OK: GET
# - OK: Resources would be inaccessible after expired
# - OK: Snippets expiry should be extended when they are accessed.
| {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,582 | marcelosalloum/snippet-api | refs/heads/master | /app/__init__.py | from flask import Flask
from app.config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_migrate import Migrate
# Initialize dependencies
db = SQLAlchemy()
ma = Marshmallow()
migrate = Migrate()
def create_app(config_class=Config):
# Configure app
app = Flask(__name__)
app.config.from_object(config_class)
app.url_map.strict_slashes = False
# models Blueprint
from app.models import bp as models_bp
app.register_blueprint(models_bp)
# api Blueprint
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
# Link dependencies
db.init_app(app)
ma.init_app(app)
migrate.init_app(app, db)
return app
| {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,583 | marcelosalloum/snippet-api | refs/heads/master | /main.py | from app import create_app, db
from app.models import Snippet
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Snippet': Snippet} | {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,584 | marcelosalloum/snippet-api | refs/heads/master | /app/models/snippet.py | from app import db, ma
from datetime import datetime, timedelta
from marshmallow import EXCLUDE
from werkzeug.security import generate_password_hash, check_password_hash
class Snippet(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
snippet = db.Column(db.String(1000000))
expires = db.Column(db.DateTime)
password_hash = db.Column(db.String(128))
# PASSWORD
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
if self.password_hash is None:
return False
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'<Snippet:{self.id} {self.name}>'
class SnippetScheme(ma.ModelSchema):
class Meta:
model = Snippet
fields = ('id', 'name', 'snippet', 'expires', '_links')
unknown = EXCLUDE
# Smart hyperlinking
_links = ma.Hyperlinks(
{"self": ma.URLFor("api.get_snippet", id="<id>")}
)
| {"/app/api/errors.py": ["/app/__init__.py"], "/app/models/__init__.py": ["/app/models/snippet.py"], "/app/api/snippets.py": ["/app/__init__.py", "/app/api/errors.py", "/app/models/__init__.py"], "/app/__init__.py": ["/app/models/__init__.py"], "/main.py": ["/app/__init__.py", "/app/models/__init__.py"], "/app/models/snippet.py": ["/app/__init__.py"]} |
77,589 | ichunyeh/HFDiagnosis | refs/heads/master | /test.py | '''
Integration
'''
import os
import numpy as np
from sklearn.model_selection import train_test_split
import keras.backend as K
from keras.models import load_model
from preprocessing import load_data
seed = 1
np.random.seed(seed)
# parameters
# os.chdir('/Users/isabellepolizzi/Desktop/UPC/IDSS/PW3/idss_pw3/')
OUTPUT_DIR = 'results/'
DATA_DIR = 'data/'
DATA_PATH = DATA_DIR + 'processed_cleveland_data.csv'
IMP_M = 'x' # missing data treatment
SCALE_M = 'min_max' # scale x(attributes)
isW = 1
W_TRAINABLE = 0
ANN_PATH = OUTPUT_DIR + 'ANNmodel'+'-IMP'+IMP_M+'-SCALE'+SCALE_M+'-isW'+str(isW)+'-wTrain'+str(W_TRAINABLE)+'.h5'
ANN_AGAIN = 0
isEval = 0
COLS_PATH = DATA_DIR + 'missing_cols' # missing cols
def train():
# load data
X, y = load_data(DATA_PATH, IMP_M, SCALE_M)
# split train/valid/test
print('\nSplit... (65% train, 20% valid, 15% test)')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=seed)
print('Number of training+validation data:', X_train.shape[0])
print('Number of testing data:', X_test.shape[0])
# model
if (not os.path.isfile(ANN_PATH)) | ANN_AGAIN :
print('\nTraining ANN...')
from model import ANN
model = ANN(X_train, y_train, isW, W_TRAINABLE, ANN_PATH, isPlot=OUTPUT_DIR)
else:
print('\nLoading ANN...')
model = load_model(ANN_PATH)
# evaluate model
train_scores = model.evaluate(X_train, y_train, verbose=0)
print("Train %s: %.2f%%" % (model.metrics_names[1], train_scores[1] * 100))
test_scores = model.evaluate(X_test, y_test, verbose=0)
print("Test %s: %.2f%%" % (model.metrics_names[1], test_scores[1] * 100))
# evaluation
if isEval:
print('\nEvaluating...')
from eval import eval
eval(model, X_test, y_test, OUTPUT_DIR)
def predict_HFp(X, needScale=True):
# load model
if (not os.path.isfile(ANN_PATH)) | ANN_AGAIN : train()
model = load_model(ANN_PATH)
if needScale: X = col_scaling(X)
# prediction
np.set_printoptions(precision=2)
pred = model.predict(X)[:, 1] * 100
return pred
def col_scaling(X):
import pickle
# scaler1 : handle cols(-missing_cols)
cols = [i for i in range(X.shape[1])]
missing_cols = np.fromfile(COLS_PATH, dtype=np.float128, sep=' ')
missing_cols = np.reshape(missing_cols, len(missing_cols))
for i in sorted(missing_cols, reverse=True):
cols = np.delete(cols, i)
scaler1 = pickle.load(open(DATA_DIR+'scaler1.sav', 'rb'))
X[:, cols] = scaler1.transform(X[:, cols])
# scaler2
scaler2 = pickle.load(open(DATA_DIR+'scaler2.sav', 'rb'))
X_scaled = scaler2.transform(X)
return X_scaled
if __name__ == '__main__':
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
print('Saving on... ' + os.getcwd() + '/'+ OUTPUT_DIR)
# predict original data
# X, y = load_data(DATA_PATH, IMP_M, SCALE_M)
# print(predict_HFp(X, needScale=False))
# predict new data
# new_x = np.array([[67.0,1.0,4.0,160.0,286.0,0.0,2.0,108.0,1.0,1.5,2.0,3.0,3.0]], dtype=float)
# print('\nnew patient:', new_x)
# print(predict_HFp(new_x))
K.clear_session() # delete session from keras backend
| {"/test.py": ["/model.py"]} |
77,590 | ichunyeh/HFDiagnosis | refs/heads/master | /model.py | '''
13-13-10-2
'''
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from faphy import get_weights
W_PATH = 'data/weights'
W_AGAIN = False
def ANN(X_train, y_train, isW, w_trainable, model_path, isPlot=0, isShow=0):
if isW:
# attribute weight (faphy)
if os.path.isfile(W_PATH) & (not W_AGAIN): # exist
print('Loading attribute weights...')
w = np.fromfile(W_PATH, dtype=np.float128, sep=' ')
w = np.reshape(w, len(w))
else: # not exist
print('Computing attribute weights...')
w = get_weights(W_PATH)
# print(w)
# build ANN layer1 weights
W = np.zeros([13, 13])
for i in range(13): W[i, i] = w[i]
# print(W)
b = np.zeros([13])
# create model
model = Sequential()
if isW: model.add(Dense(13, input_dim=13, weights=[W, b], trainable=w_trainable)) # layer1 : 13-13 (attribute weight)
model.add(Dense(10, input_dim=13, activation='sigmoid'))
model.add(Dense(2, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy']) # TODO: make sure which optimizer is better here
# model.summary()
# fit model
history = model.fit(X_train, y_train, # TODO: set these parameters in test.py
batch_size=50,
epochs=2000,
verbose=isShow,
validation_split=20 / 85) # train(65%)+validation(20%)=train(85%) /test(15%), split train/validation here
if isPlot != 0:
# print(history.history.keys()) # list all data in history
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.savefig(isPlot + 'acc.png')
# plt.show()
plt.close()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.savefig(isPlot + 'loss.png')
# plt.show()
plt.close()
# save model
model.save(model_path)
return model | {"/test.py": ["/model.py"]} |
77,607 | keitakuki/ShadowSkinning | refs/heads/master | /lib/contour.py | import cv2
def find_contours_and_hierarchy(src):
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
def find_human_contour(contours, hierarchy):
max_area = 0
human_contour = None
for i, contour in enumerate(contours):
# remove outer triangle
if hierarchy[0][i][2] > -1:
continue
area = cv2.contourArea(contour)
if max_area < area:
max_area = area
human_contour = contour
return human_contour
def draw_contour(img, contour):
cv2.drawContours(img, [contour], 0, (0, 0, 255), 1)
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,608 | keitakuki/ShadowSkinning | refs/heads/master | /run_skinning.py | import cv2
import matplotlib.pyplot as plt
from tf_pose.common import read_imgfile
from lib.common import draw_circle
from lib.contour import find_contours_and_hierarchy, find_human_contour
from lib.skeleton import SkeletonImplement
from lib.skinning import NearestNeighbourSkinning
if __name__ == '__main__':
src = read_imgfile("./images/shadow.jpg", None, None)
dst = src.copy()
contours, hierarchy = find_contours_and_hierarchy(src)
human_contour = find_human_contour(contours, hierarchy)
skeletonImplement = SkeletonImplement()
humans = skeletonImplement.infer_skeletons(src)
skinning = NearestNeighbourSkinning(src, humans[0], human_contour)
# visualization
for i in [100, 300, 500]:
draw_circle(dst, skinning.contour_vertex_positions[i], (255, 0, 0))
draw_circle(dst, skinning.body_part_positions[skinning.nearest_body_part_indices[i]])
# cv2.imwrite("./images/nearest.png", dst)
plt.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
plt.show()
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,609 | keitakuki/ShadowSkinning | refs/heads/master | /lib/skinning.py | import sys
import cv2
from tf_pose.common import CocoPart
from lib.common import calculate_squared_distance
class NearestNeighbourSkinning:
def __init__(self, src, human, human_contour):
self.body_part_positions = []
self.contour_vertex_positions = []
self.triangle_vertex_indices = []
self.nearest_body_part_indices = []
self.influence = []
image_height, image_width = src.shape[:2]
# body_part_positions
for i in range(CocoPart.Background.value):
if i not in human.body_parts.keys():
continue
body_part = human.body_parts[i]
body_part_position = (int(body_part.x * image_width + 0.5), int(body_part.y * image_height + 0.5))
self.body_part_positions.append(body_part_position)
# contour_vertex_positions
subdivision = cv2.Subdiv2D((0, 0, image_width, image_height))
for j in range(len(human_contour)):
contour_point = tuple(human_contour[j][0])
subdivision.insert(contour_point)
self.contour_vertex_positions.append(contour_point)
# triangle_vertex_indices
contour_vertex_indices = dict((coord, index) for index, coord in enumerate(self.contour_vertex_positions))
triangle_list = subdivision.getTriangleList()
for t in triangle_list:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
triangle_center = (int((t[0] + t[2] + t[4]) / 3), int((t[1] + t[3] + t[5]) / 3))
if cv2.pointPolygonTest(human_contour, triangle_center, False) < 1:
continue
self.triangle_vertex_indices.append([
contour_vertex_indices[pt1],
contour_vertex_indices[pt2],
contour_vertex_indices[pt3]
])
# nearest_body_part_indices
for i in range(len(self.contour_vertex_positions)):
contour_vertex_position = self.contour_vertex_positions[i]
tmp = sys.maxsize
nearest_body_part_index = None
for j in range(len(self.body_part_positions)):
squared_distance = calculate_squared_distance(contour_vertex_position, self.body_part_positions[j])
if tmp < squared_distance:
continue
tmp = squared_distance
nearest_body_part_index = j
self.nearest_body_part_indices.append(nearest_body_part_index)
# influence
for i in range(len(self.nearest_body_part_indices)):
self.influence.append(1)
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,610 | keitakuki/ShadowSkinning | refs/heads/master | /draw_skeleton.py | import cv2
import matplotlib.pyplot as plt
from tf_pose.common import read_imgfile
from lib.skeleton import SkeletonImplement
if __name__ == '__main__':
src = read_imgfile("./images/shadow.jpg", None, None)
dst = src.copy()
skeletonImplement = SkeletonImplement()
dst = skeletonImplement.draw_skeletons(dst)
plt.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
plt.show()
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,611 | keitakuki/ShadowSkinning | refs/heads/master | /polygon_division.py | import cv2
from lib.contour import find_contours_and_hierarchy, find_human_contour
from lib.triangulation import SimpleTriangulation
if __name__ == "__main__":
src = cv2.imread("./images/shadow.jpg")
dst = src.copy()
contours, hierarchy = find_contours_and_hierarchy(src)
human_contour = find_human_contour(contours, hierarchy)
triangulation = SimpleTriangulation(src, human_contour)
triangulation.draw_triangles(dst)
# cv2.imwrite("./images/polygon_division.png", dst)
cv2.imshow("mesh_division", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,612 | keitakuki/ShadowSkinning | refs/heads/master | /draw_contour.py | import cv2
from lib.contour import find_contours_and_hierarchy, find_human_contour, draw_contour
if __name__ == "__main__":
src = cv2.imread("./images/shadow.jpg")
dst = src.copy()
contours, hierarchy = find_contours_and_hierarchy(src)
human_contour = find_human_contour(contours, hierarchy)
draw_contour(dst, human_contour)
# cv2.imwrite("./images/contour.png", dst)
cv2.imshow("contour", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,613 | keitakuki/ShadowSkinning | refs/heads/master | /lib/skeleton.py | from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path
class SkeletonImplement:
def __init__(self):
self.estimator = TfPoseEstimator(get_graph_path("mobilenet_thin"), target_size=(368, 368))
def infer_skeletons(self, src):
return self.estimator.inference(src, upsample_size=4.0)
def draw_skeletons(self, img):
humans = self.infer_skeletons(img)
return self.estimator.draw_humans(img, humans, imgcopy=False)
| {"/run_skinning.py": ["/lib/contour.py", "/lib/skeleton.py", "/lib/skinning.py"], "/draw_skeleton.py": ["/lib/skeleton.py"], "/polygon_division.py": ["/lib/contour.py"], "/draw_contour.py": ["/lib/contour.py"]} |
77,614 | jost95/atp-tennis-predictions | refs/heads/master | /pre_processing.py | import os
import time
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from definitions import GEN_PATH
from utilities import helper as h
def process_matches(stats_filepath, proc_match_filepath, t_weights, base_weight, proc_years, t_levels, surfaces):
# Generates a match matrix with certain statistics for each match
print('----- GENERATING PRE-PROCESSED MATCHES -----')
start_time = time.time()
mutual_matches_clay = pd.read_hdf(stats_filepath, key='mm_clay')
mutual_matches_grass = pd.read_hdf(stats_filepath, key='mm_grass')
mutual_matches_hard = pd.read_hdf(stats_filepath, key='mm_hard')
mutual_matches = mutual_matches_clay + mutual_matches_grass + mutual_matches_hard
mutual_score = pd.read_hdf(stats_filepath, key='ms')
cond_stats = pd.read_hdf(stats_filepath, key='cs')
print('Generated statistics loaded')
# Load rankings
rankings = h.load_rankings()
# Load raw_matches and sport by date
print('Loading raw matches...')
raw_matches = h.load_matches(proc_years)
raw_matches.sort_values(by=['tourney_date'], inplace=True, ascending=True)
# Load last years matches to calculate recent performance for matches in january
last_year = proc_years['from'] - 1
recent_years = {
'from': last_year,
'to': last_year
}
current_tourney_date = raw_matches.iloc[0].tourney_date
month_offset = 3
date_limit = current_tourney_date - pd.DateOffset(months=month_offset)
print('Loading recent matches...')
recent_matches = h.load_matches(recent_years)
recent_matches = recent_matches.loc[recent_matches.tourney_date >= date_limit]
# Load tournament details
tourneys = pd.read_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'), index_col=0)
data_columns = ['tourney_date', 'rel_total_wins', 'rel_surface_wins', 'mutual_wins', 'mutual_surface_wins',
'mutual_games', 'rank_diff', 'points_grad_diff', 'home_advantage', 'rel_climate_wins',
'rel_recent_wins', 'rel_tourney_games', 'tourney_level', 'player_1', 'player_2', 'surface',
'age_diff', 'outcome']
matches = np.zeros((len(raw_matches), len(data_columns)), dtype=np.int64)
matches = pd.DataFrame(matches, columns=data_columns)
i = 0
no_matches = len(raw_matches)
print('Pre-processing matches...')
# Generate training matrix and update statistics matrices
# Loop unavoidable
for raw_match in raw_matches.itertuples():
match = matches.iloc[i].copy()
winner_id = raw_match.winner_id
loser_id = raw_match.loser_id
tourney_date = raw_match.tourney_date
time_weight = h.get_time_weight(tourney_date)
surface = h.get_surface(raw_match.surface)
location = h.filter_tourney_name(raw_match.tourney_name)
climate = tourneys.loc[tourneys.location == location, 'climate']
if len(climate) > 0:
climate = climate.iloc[0]
else:
# If climate unknown, assume tempered (maybe indoor)
climate = 'tempered'
# Update recent matches where tournament date is strictly larger one month ago
if tourney_date > current_tourney_date:
current_tourney_date = tourney_date
date_limit = current_tourney_date - pd.DateOffset(months=month_offset)
recent_matches = recent_matches.loc[recent_matches.tourney_date >= date_limit]
# 1. Relative total win raw_matches differences
rel_total_wins = h.get_relative_total_wins(cond_stats, winner_id, loser_id)
match.rel_total_wins = round(base_weight * rel_total_wins)
# 2. Relative surface win differences
rel_surface_wins = h.get_relative_surface_wins(cond_stats, winner_id, loser_id, surface)
match.rel_surface_wins = round(base_weight * rel_surface_wins)
# 3. Mutual wins
mutual_wins = mutual_matches[winner_id][loser_id] - mutual_matches[loser_id][winner_id]
match.mutual_wins = mutual_wins
# 4. Mutual surface wins
mutual_surface_wins = h.get_mutual_surface_wins(mutual_matches_clay, mutual_matches_grass, mutual_matches_hard,
surface, winner_id, loser_id)
match.mutual_surface_wins = mutual_surface_wins
# 4. Mutual game
mutual_games = mutual_score[winner_id][loser_id] - mutual_score[loser_id][winner_id]
match.mutual_games = mutual_games
# 5. Rank diff
rank_diff, points_grad_diff = h.get_rankings(rankings, winner_id, loser_id, tourney_date)
match.rank_diff = rank_diff
match.points_grad_diff = points_grad_diff
# 6. Home advantage
home_advantage = h.get_home_advantage(raw_match.winner_ioc, raw_match.loser_ioc, tourneys,
raw_match.tourney_name)
match.home_advantage = home_advantage
# 7. Relative climate win differences
rel_climate_wins = h.get_relative_climate_wins(cond_stats, winner_id, loser_id, climate)
match.rel_climate_wins = round(base_weight * rel_climate_wins)
# 8. Get recent wins
rel_recent_wins = h.get_recent_performance(winner_id, loser_id, recent_matches, tourney_date)
match.rel_recent_wins = round(base_weight * rel_recent_wins)
# 9. Get tournament performance in games
tourney_id = raw_match.tourney_id
match_num = raw_match.match_num
rel_tourney_games = h.get_tourney_games(winner_id, loser_id, recent_matches, tourney_id, match_num)
match.rel_tourney_games = rel_tourney_games
# 10. Set age difference
match.age_diff = raw_match.winner_age - raw_match.loser_age
# 11. Winner is always winner
match.outcome = 1
# Create a balanced set with equal outcomes
if i % 2 == 0:
try:
# An error here occured once and I am not sure why
match = -match
except TypeError:
print(match)
# 12. Set non numeric stuff after balancing set
# Set the date as unix time so the store is more efficient (integer)
match.tourney_date = int(tourney_date.timestamp())
match.player_1 = winner_id
match.player_2 = loser_id
match.tourney_level = t_levels[raw_match.tourney_level]
match.surface = surfaces[surface]
# Update entry
matches.iloc[i] = match
# Add current match to recent matches
# noinspection PyProtectedMember
raw_match_df = pd.DataFrame.from_records([raw_match], columns=raw_match._fields, exclude=['Index'])
recent_matches = recent_matches.append(pd.DataFrame(raw_match_df))
# Update stats matrices
match_d_weight = round(base_weight * time_weight)
match_dt_weight = round(base_weight * time_weight * t_weights[raw_match.tourney_level])
cond_stats['total_wins'][winner_id] += match_dt_weight
cond_stats['surface_' + surface + '_wins'][winner_id] += match_d_weight
cond_stats['climate_' + climate + '_wins'][winner_id] += match_d_weight
cond_stats['total_losses'][loser_id] += match_dt_weight
cond_stats['surface_' + surface + '_losses'][loser_id] += match_d_weight
cond_stats['climate_' + climate + '_losses'][loser_id] += match_d_weight
# Update mutual stats
mutual_matches[winner_id][loser_id] += match_d_weight
# Extract win on surface
if surface == 'clay':
mutual_matches_clay[winner_id][loser_id] += match_d_weight
elif surface == 'grass':
mutual_matches_grass[winner_id][loser_id] += match_d_weight
else:
mutual_matches_hard[winner_id][loser_id] += match_d_weight
try:
winner_games, loser_games = h.get_score(raw_match.score)
except ValueError:
winner_games = 0
loser_games = 0
mutual_score[winner_id][loser_id] += round(base_weight * time_weight * winner_games)
mutual_score[loser_id][winner_id] += round(base_weight * time_weight * loser_games)
# Update counter
i += 1
h.print_progress(i, no_matches)
print('All', no_matches, 'matches (100%) processed')
cols_not_scale = ['tourney_date', 'home_advantage', 'tourney_level', 'player_1', 'player_2', 'surface', 'outcome']
matches_not_scale = matches.filter(cols_not_scale, axis=1)
matches_scale = matches.drop(cols_not_scale, axis=1)
matches_scale[matches_scale.columns] = StandardScaler().fit_transform(matches_scale[matches_scale.columns])
matches = matches_scale.join(matches_not_scale)
matches.to_hdf(proc_match_filepath, key='matches', mode='w')
print('Pre-processed H5 matches saved')
end_time = time.time()
time_diff = round(end_time - start_time)
print('----- PRE-PROCESS COMPLETED, EXEC TIME:', time_diff, 'SECONDS ----- \n')
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,615 | jost95/atp-tennis-predictions | refs/heads/master | /definitions.py | import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
GEN_PATH = os.path.join(ROOT_DIR, 'input/generated/')
RAW_PATH = os.path.join(ROOT_DIR, 'input/raw/')
ODDS_PATH = os.path.join(ROOT_DIR, 'input/odds/')
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,616 | jost95/atp-tennis-predictions | refs/heads/master | /utilities/extract_tourney.py | # This script should only be run once to generate country codes
import json
import os
import pandas as pd
import numpy as np
import requests
import pycountry
from definitions import GEN_PATH, ROOT_DIR
from utilities import helper as h
years = {'from': 2010, 'to': 2019}
matches = h.load_matches(years)
def extract_country_name():
# Get unique names
unique_tourneys = np.unique(matches.tourney_name.to_numpy())
# Extract name of location
locations = np.vectorize(h.filter_tourney_name)(unique_tourneys)
# Get unique locations
unique_loc = np.unique(locations)
# Remove empty strings
unique_loc = np.array(list(filter(None, unique_loc))).tolist()
# Generate empty arrays
countries = []
# Create new web session
search_session = requests.Session()
# Fetch country name from location
# noinspection PyTypeChecker
for i in range(len(unique_loc)):
if i % 50 == 0:
print(i)
countries.append(h.fetch_country(unique_loc[i], search_session))
unique_loc = pd.DataFrame(unique_loc, columns=['location'])
countries = pd.DataFrame(countries, columns=['country_name'])
tourney_info = pd.concat([countries, unique_loc], sort=False, axis=1)
tourney_info.to_csv(os.path.join(GEN_PATH, 'tourneys_raw.csv'))
def manual_country_fix():
tourney_info = pd.read_csv(os.path.join(GEN_PATH, 'tourneys_raw.csv'), index_col=0)
with open(os.path.join(ROOT_DIR, 'utilities/location_swaps.json')) as f:
swaps = json.load(f)
from_swaps = []
to_swaps = []
for swap in swaps:
for name in swap['from']:
from_swaps.append(name)
to_swaps.append(swap['to'])
tourney_info.country_name.replace(from_swaps, to_swaps, inplace=True)
tourney_info.sort_values(by=['country_name', 'location'], inplace=True, ascending=True)
tourney_info.reset_index(drop=True, inplace=True)
tourney_info.to_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'))
def get_country_code():
tourney_info = pd.read_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'), index_col=0)
# Add new country code column
country_code = pd.DataFrame(np.zeros_like(tourney_info.country_name), columns=['country_code'])
tourney_info = pd.concat([tourney_info, country_code], sort=False, axis=1)
unique_countries = np.unique(tourney_info.country_name)
unique_ccs = []
for country in unique_countries:
try:
unique_ccs.append(pycountry.countries.get(name=country).alpha_3)
except AttributeError:
unique_ccs.append(country)
for i in range(len(unique_ccs)):
tourney_info.loc[tourney_info.country_name == unique_countries[i], 'country_code'] = unique_ccs[i]
tourney_info.to_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'))
def get_climate():
tourney_info = pd.read_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'), index_col=0)
# Add new climate column
climate = pd.DataFrame(tourney_info.country_name.tolist(), columns=['climate'])
tourney_info = pd.concat([tourney_info, climate], sort=False, axis=1)
with open(os.path.join(ROOT_DIR, 'utilities/country_climate.json')) as f:
unique_climates = json.load(f)
to_climates = []
from_countries = []
for climate in unique_climates:
for country in climate['countries']:
from_countries.append(country)
to_climates.append(climate['climate'])
tourney_info.climate.replace(from_countries, to_climates, inplace=True)
tourney_info.to_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'))
# 1. Geo-location search for country name (takes time)
# extract_country_name()
# 2 Fix wrongly formatted countries and sort by country
manual_country_fix()
# 3. Lookup country code
get_country_code()
# 4. Lookup climate
get_climate()
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,617 | jost95/atp-tennis-predictions | refs/heads/master | /stats.py | import os
import time
import pandas as pd
import numpy as np
from definitions import GEN_PATH
from utilities import helper as h
def generate_match_statistics(filepath, t_weights, base_weight, stats_years, proc_years):
# Generates match statistics matrices for a certain time period
print('----- GENERATING MATCH STATISTICS -----')
start_time = time.time()
# Load players
player_ids = h.extract_player_ids(proc_years)
no_players = len(player_ids)
# Load matches to generate statistics
matches = h.load_matches(stats_years, player_ids)
no_matches = len(matches)
# Create mutual stats matrices
base_matrix = np.zeros((no_players, no_players), dtype=np.int64)
mutual_matches_clay = pd.DataFrame(base_matrix, player_ids, player_ids)
mutual_matches_grass = pd.DataFrame(base_matrix, player_ids, player_ids)
mutual_matches_hard = pd.DataFrame(base_matrix, player_ids, player_ids)
mutual_score = pd.DataFrame(base_matrix, player_ids, player_ids)
# Create general perfomance matrix
cond_cat = ['total_wins', 'total_losses', 'surface_clay_wins', 'surface_clay_losses', 'surface_grass_wins',
'surface_grass_losses', 'surface_hard_wins', 'surface_hard_losses', 'climate_tropical_dry_wins',
'climate_tropical_dry_losses', 'climate_tempered_wins', 'climate_tempered_losses']
cond_stats = np.zeros((no_players, len(cond_cat)), dtype=np.int64)
cond_stats = pd.DataFrame(cond_stats, player_ids, cond_cat)
# Load tournament details
tourneys = pd.read_csv(os.path.join(GEN_PATH, 'tourneys_fixed.csv'), index_col=0)
# Counter for timing purposes
i = 0
print('Generating match statistics...')
# Loop is unavoidable...
for match in matches.itertuples():
winner_id = match.winner_id
loser_id = match.loser_id
time_weight = h.get_time_weight(match.tourney_date)
surface = h.get_surface(match.surface)
location = h.filter_tourney_name(match.tourney_name)
climate = tourneys.loc[tourneys.location == location, 'climate']
if len(climate) > 0:
climate = climate.iloc[0]
else:
# If climate unknown, assume tempered (maybe indoor)
climate = 'tempered'
# Calculate match weights
match_d_weight = round(base_weight * time_weight)
match_dt_weight = round(base_weight * time_weight * t_weights[match.tourney_level])
# Check flags
winner_in_ids = False
loser_in_ids = False
# Winner stats
if winner_id in player_ids:
cond_stats['total_wins'][winner_id] += match_dt_weight
cond_stats['surface_' + surface + '_wins'][winner_id] += match_d_weight
cond_stats['climate_' + climate + '_wins'][winner_id] += match_d_weight
winner_in_ids = True
# Loser stats
if loser_id in player_ids:
cond_stats['total_losses'][loser_id] += match_dt_weight
cond_stats['surface_' + surface + '_losses'][loser_id] += match_d_weight
cond_stats['climate_' + climate + '_losses'][loser_id] += match_d_weight
loser_in_ids = True
# Mutual statistics
if winner_in_ids and loser_in_ids:
# Extract win on surface
if surface == 'clay':
mutual_matches_clay[winner_id][loser_id] += match_d_weight
elif surface == 'grass':
mutual_matches_grass[winner_id][loser_id] += match_d_weight
else:
mutual_matches_hard[winner_id][loser_id] += match_d_weight
try:
winner_games, loser_games = h.get_score(match.score)
except ValueError:
winner_games = 0
loser_games = 0
mutual_score[winner_id][loser_id] += round(base_weight * time_weight * winner_games)
mutual_score[loser_id][winner_id] += round(base_weight * time_weight * loser_games)
# Update counter
i += 1
h.print_progress(i, no_matches)
print('All', no_matches, 'matches (100%) processed')
# To avoid running script every training phase
mutual_matches_clay.to_hdf(filepath, key='mm_clay', mode='w')
mutual_matches_grass.to_hdf(filepath, key='mm_grass')
mutual_matches_hard.to_hdf(filepath, key='mm_hard')
mutual_score.to_hdf(filepath, key='ms')
cond_stats.to_hdf(filepath, key='cs')
print('H5 statistics file saved')
end_time = time.time()
time_diff = round(end_time - start_time)
print('----- MATCH STATISTICS COMPLETED, EXEC TIME:', time_diff, 'SECONDS ----- \n')
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,618 | jost95/atp-tennis-predictions | refs/heads/master | /main.py | import json
import os
from definitions import GEN_PATH, ROOT_DIR
from stats import generate_match_statistics
from pre_processing import process_matches
# Read configuration file
with open(os.path.join(ROOT_DIR, 'config.json')) as f:
config = json.load(f)
stats_filepath = os.path.join(GEN_PATH, config['stats_filename'])
proc_match_filepath = os.path.join(GEN_PATH, config['proc_match_filename'])
base_weight = config['base_weight']
t_weights = config['tourney_weights']
t_levels = config['tourney_levels']
surfaces = config['surfaces']
stats_years = config['stats_year']
proc_years = config['proc_year']
# GENERATE STATISTICS
# - create new statistical data to be used for training
if config['generate_stats']:
generate_match_statistics(stats_filepath, t_weights, base_weight, stats_years, proc_years)
# FEATURE ENGINEERING
# - generate new features to be evaluated
if config['generate_training']:
process_matches(stats_filepath, proc_match_filepath, t_weights, base_weight, proc_years, t_levels, surfaces)
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,619 | jost95/atp-tennis-predictions | refs/heads/master | /utilities/helper.py | # Helper functions
import datetime as dt
import pandas as pd
import numpy as np
import os
import re
from definitions import RAW_PATH
# Timing logger for dataframe operations
def logger(f):
def wrapper(df, *args, **kwargs):
start = dt.datetime.now()
result = f(df, *args, **kwargs)
end = dt.datetime.now()
if isinstance(result, tuple):
shape = ''
for r in result:
shape += str(r.shape)
else:
shape = result.shape
print(f"{f.__name__} took={end - start} shape={shape}")
return result
return wrapper
def get_home_advantage(winner_ioc, loser_ioc, tourneys, tourney_name):
location = filter_tourney_name(tourney_name)
country_code = tourneys.loc[tourneys.location == location, 'country_code']
if len(country_code) == 0:
return 0
else:
country_code = country_code.iloc[0]
if winner_ioc == country_code and loser_ioc == country_code:
return 0
elif winner_ioc == country_code:
return 1
elif loser_ioc == country_code:
return -1
else:
return 0
def filter_tourney_name(words):
# This is not the most elegant solution but it works
s = ' '.join(
w for w in words.split() if (len(w) > 3 or (len(w) == 3 and w[len(w) - 1].isalpha()) or w[len(w) - 1] == '.'))
# Make lowercase
s = s.lower()
# Remove anything but letters and space
s = re.sub(r'[^a-z\s]', u'', s, flags=re.UNICODE)
# Replace all davis cup
if 'davis cup' in s:
s = ''
return s
def fetch_country(location, session):
url = "https://nominatim.openstreetmap.org/search"
params = {
"q": location,
"format": "json",
"addressdetails": 1,
"accept-language": "en-US"
}
results = session.get(url=url, params=params).json()
# If no search results found (e.g. 'atp challenger tour finals')
if len(results) == 0:
country = location
else:
country = results[0]['address']['country']
return country
def extract_player_ids(years):
# Extract active players in a specific year range
matches = load_matches(years)
winner_ids = matches.winner_id.to_numpy()
loser_ids = matches.loser_id.to_numpy()
# Filter out unique players
players = np.unique(np.append(winner_ids, loser_ids))
print('Players loaded, number of players:', len(players))
return players
def load_matches(years, player_ids=None):
# Load matches in a specific year range
# If specified, sorts out matches where no players are in player_ids
matches = []
for year in range(years['from'], years['to'] + 1):
matches.append(
pd.read_csv(os.path.join(RAW_PATH, 'atp_matches_futures_' + str(year) + '.csv'),
parse_dates=['tourney_date']))
matches.append(
pd.read_csv(os.path.join(RAW_PATH, 'atp_matches_qual_chall_' + str(year) + '.csv'),
parse_dates=['tourney_date']))
matches.append(
pd.read_csv(os.path.join(RAW_PATH, 'atp_matches_' + str(year) + '.csv'), parse_dates=['tourney_date']))
matches = pd.concat(matches, sort=False)
if player_ids is not None:
# Remove not wanted matches
matches = matches[matches['winner_id'].isin(player_ids) | matches['loser_id'].isin(player_ids)]
# Drop not relevant columns
matches = matches.filter(
['tourney_name', 'winner_id', 'winner_ioc', 'loser_id', 'loser_ioc', 'tourney_date', 'tourney_level',
'surface', 'score', 'match_num', 'tourney_id', 'winner_age', 'loser_age'])
# Sort by date (oldest ranking first)
matches.sort_values(by=['tourney_date', 'match_num'], inplace=True, ascending=True)
print('Matches loaded, number of matches:', len(matches))
return matches
def get_time_weight(current_date):
# Return an exponential weighted time decay, base year is 2019
time_delta = (dt.date(2019, 1, 1) - current_date.date()).days
return np.exp(-time_delta / (365 * 5))
def get_surface(surface):
# Guesses the surface as hard if specified is not known
surface = str(surface).lower()
return 'hard' if surface == 'nan' or surface == 'none' or surface == 'carpet' else surface
def get_score(score):
games_w = 0
games_l = 0
try:
for m in re.finditer('-', score):
i = m.start()
games_w += int(score[i - 1])
games_l += int(score[i + 1])
except ValueError:
print(score)
return games_w, games_l
def print_progress(i, no_matches):
# Prints the process
if i % 1000 == 0:
print(i, 'matches (' + str(round(i / no_matches * 100, 2)) + '%) processed')
def load_rankings():
# Loads player rankings and sorts them in ascending order
rankings_10s = pd.read_csv(os.path.join(RAW_PATH, 'atp_rankings_10s.csv'), parse_dates=['ranking_date'])
rankings_current = pd.read_csv(os.path.join(RAW_PATH, 'atp_rankings_current.csv'), parse_dates=['ranking_date'])
rankings = pd.concat([rankings_10s, rankings_current], sort=False)
# Sort by date (oldest ranking first)
rankings.sort_values(by=['ranking_date'], inplace=True, ascending=True)
print('Rankings loaded')
return rankings
def get_tourney_games(winner_id, loser_id, recent_matches, tourney_id, match_num):
# Get recent performance in relative number of games diff IN CURRENT tournament
diff_games_winner = 0
no_matches_winner = 0
diff_games_loser = 0
no_matches_loser = 0
for match in recent_matches.itertuples():
if match.tourney_id == tourney_id and match.match_num < match_num:
try:
winner_games, loser_games = get_score(match.score)
except ValueError:
winner_games = 0
loser_games = 0
diff_score = winner_games - loser_games
# Note that I am accounting for round robin match types here, e.g. ATP finals
if match.winner_id == winner_id:
diff_games_winner += diff_score
no_matches_winner += 1
elif match.loser_id == winner_id:
diff_games_winner -= diff_score
no_matches_winner += 1
if match.winner_id == loser_id:
diff_games_loser += diff_score
no_matches_loser += 1
elif match.loser_id == loser_id:
diff_games_loser -= diff_score
no_matches_loser += 1
avg_diff_winner = 0
avg_diff_loser = 0
if no_matches_winner > 0:
avg_diff_winner = diff_games_winner / no_matches_winner
if no_matches_loser > 0:
avg_diff_loser = diff_games_loser / no_matches_loser
return avg_diff_winner - avg_diff_loser
def get_recent_performance(winner_id, loser_id, recent_matches, tourney_id):
# Extract recent perfomance in terms of relative win from recent matches BEFORE tournament
recent_wins_winner = 0
recent_played_winner = 0
recent_wins_loser = 0
recent_played_loser = 0
for match in recent_matches.itertuples():
if match.tourney_id != tourney_id:
if match.winner_id == winner_id:
recent_wins_winner += 1
recent_played_winner += 1
elif match.winner_id == loser_id:
recent_wins_loser += 1
recent_played_loser += 1
if match.loser_id == winner_id:
recent_played_winner += 1
elif match.loser_id == loser_id:
recent_played_loser += 1
if recent_played_winner == 0:
rel_wins_winner = 0
else:
rel_wins_winner = recent_wins_winner / recent_played_winner
if recent_played_loser == 0:
rel_wins_loser = 0
else:
rel_wins_loser = recent_wins_loser / recent_played_loser
return rel_wins_winner - rel_wins_loser
def get_relative_climate_wins(cond_stats, winner_id, loser_id, climate):
# For each player, calculates the ratio won matches in the current climate and
# then takes the difference between them
climate_wins_winner = cond_stats['climate_' + climate + '_wins'][winner_id]
climate_losses_winner = cond_stats['climate_' + climate + '_losses'][winner_id]
climate_played_winner = climate_wins_winner + climate_losses_winner
climate_wins_loser = cond_stats['climate_' + climate + '_wins'][loser_id]
climate_losses_loser = cond_stats['climate_' + climate + '_losses'][loser_id]
climate_played_loser = climate_wins_loser + climate_losses_loser
if climate_played_winner == 0:
rel_climate_wins_winner = 0
else:
rel_climate_wins_winner = float(climate_wins_winner) / climate_played_winner
if climate_played_loser == 0:
rel_climate_wins_loser = 0
else:
rel_climate_wins_loser = float(climate_wins_loser) / climate_played_loser
return rel_climate_wins_winner - rel_climate_wins_loser
def get_relative_surface_wins(cond_stats, winner_id, loser_id, surface):
# For each player, calculates the ratio won matches on the surface and
# then takes the difference between them
surface_wins_winner = cond_stats['surface_' + surface + '_wins'][winner_id]
surface_losses_winner = cond_stats['surface_' + surface + '_losses'][winner_id]
surface_played_winner = surface_wins_winner + surface_losses_winner
surface_wins_loser = cond_stats['surface_' + surface + '_wins'][loser_id]
surface_losses_loser = cond_stats['surface_' + surface + '_losses'][loser_id]
surface_played_loser = surface_wins_loser + surface_losses_loser
if surface_played_winner == 0:
rel_surface_wins_winner = 0
else:
rel_surface_wins_winner = float(surface_wins_winner) / surface_played_winner
if surface_played_loser == 0:
rel_surface_wins_loser = 0
else:
rel_surface_wins_loser = float(surface_wins_loser) / surface_played_loser
return rel_surface_wins_winner - rel_surface_wins_loser
def get_relative_total_wins(cond_stats, winner_id, loser_id):
# For each player, calculates the ratio won matches in total and
# then takes the difference between them
total_wins_winner = cond_stats['total_wins'][winner_id]
total_losses_winner = cond_stats['total_losses'][winner_id]
total_played_winner = total_wins_winner + total_losses_winner
total_wins_loser = cond_stats['total_wins'][loser_id]
total_losses_loser = cond_stats['total_losses'][loser_id]
total_played_loser = total_wins_loser + total_losses_loser
if total_played_winner == 0:
rel_total_wins_winner = 0
else:
rel_total_wins_winner = float(total_wins_winner) / total_played_winner
if total_played_loser == 0:
rel_total_wins_loser = 0
else:
rel_total_wins_loser = float(total_wins_loser) / total_played_loser
return rel_total_wins_winner - rel_total_wins_loser
def get_mutual_surface_wins(mm_clay, mm_grass, mm_hard, surface, winner_id, loser_id):
# Calculates the difference in wins between opponents on specified surface
if surface == 'clay':
return mm_clay[winner_id][loser_id] - mm_clay[loser_id][winner_id]
elif surface == 'grass':
return mm_grass[winner_id][loser_id] - mm_grass[loser_id][winner_id]
else:
return mm_hard[winner_id][loser_id] - mm_hard[loser_id][winner_id]
def get_rankings(rankings, winner_id, loser_id, tourney_date):
# Get the current ranking differents and the one year ranking gradient in points
winner_rankings = rankings.loc[rankings['player'] == winner_id]
loser_rankings = rankings.loc[rankings['player'] == loser_id]
highest_numbered_ranking = np.max(rankings['rank'])
# Set date as ranking index, delete possible duplicates due to overlapping lists
winner_rankings.set_index('ranking_date', inplace=True)
winner_rankings = winner_rankings.loc[~winner_rankings.index.duplicated(keep='first')]
loser_rankings.set_index('ranking_date', inplace=True)
loser_rankings = loser_rankings.loc[~loser_rankings.index.duplicated(keep='first')]
# It is not certain that all players have a ranking right now
try:
winner_current_rank = winner_rankings.iloc[winner_rankings.index.get_loc(tourney_date, method='pad')]
winner_current_points = winner_current_rank['points']
winner_current_rank = winner_current_rank['rank']
except KeyError:
winner_current_rank = highest_numbered_ranking + 1
winner_current_points = 0
try:
loser_current_rank = loser_rankings.iloc[loser_rankings.index.get_loc(tourney_date, method='pad')]
loser_current_points = loser_current_rank['points']
loser_current_rank = loser_current_rank['rank']
except KeyError:
loser_current_rank = highest_numbered_ranking + 1
loser_current_points = 0
rank_diff = winner_current_rank - loser_current_rank
last_year_date = tourney_date - pd.DateOffset(years=1)
# It is not certain that all players had a ranking one year ago
try:
winner_old_rank = winner_rankings.iloc[winner_rankings.index.get_loc(last_year_date, method='pad')]
winner_old_points = winner_old_rank['points']
except KeyError:
winner_old_points = 0
try:
loser_old_rank = winner_rankings.iloc[winner_rankings.index.get_loc(last_year_date, method='pad')]
loser_old_points = loser_old_rank['points']
except KeyError:
loser_old_points = 0
winner_points_grad = winner_current_points - winner_old_points
loser_points_grad = loser_current_points - loser_old_points
points_grad_diff = winner_points_grad - loser_points_grad
return rank_diff, points_grad_diff
| {"/pre_processing.py": ["/definitions.py"], "/utilities/extract_tourney.py": ["/definitions.py"], "/stats.py": ["/definitions.py"], "/main.py": ["/definitions.py", "/stats.py", "/pre_processing.py"], "/utilities/helper.py": ["/definitions.py"]} |
77,620 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /visualization/2nd_order_correlation.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
from sklearn.cluster.bicluster import SpectralCoclustering
import math, os, gensim, scipy.sparse
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
def sigmoid(x):
return 1 / (1 + math.exp(-x))
ZERO = 0.0000000000000000000001
def gen_co_occurrence_matrix(transaction_file_path, schema, similarity=False, model=None, smoothing=False):
utility_matrix = dict()
item_set = set()
user_set = set()
with open(transaction_file_path) as f:
for line in f:
data = line.strip().split(',')
user_id = data[schema['user_id_col']]
item_id = data[schema['item_id_col']]
quantity = data[schema['quantity_col']]
item_set.add(item_id)
user_set.add(user_id)
if user_id in utility_matrix.keys():
utility_matrix[user_id].append(item_id)
else:
utility_matrix[user_id] = [item_id]
# build mapping
item_id_to_index_mapping = {x:i for i,x in enumerate(item_set)}
print(len(item_id_to_index_mapping))
counter = 0
if similarity:
item_set = list(item_set)
cooccurrence_matrix = np.zeros(shape=(len(item_set), len(item_set)))
counter = 0
for i in range(len(item_set)):
for j in range(len(item_set)):
counter += 1
# print('%3f' % (counter / len(item_set) / len(item_set)))
if i == j:
cooccurrence_matrix[i, j] = 0
continue
try:
similarity = model.similarity(item_set[i], item_set[j])
cooccurrence_matrix[i, j] = similarity
except KeyError:
cooccurrence_matrix[i, j] = 0
else: # co-occurrence (distance) matrix
cooccurrence_matrix = scipy.sparse.lil_matrix((len(item_set), len(item_set)))
for user in utility_matrix.keys():
counter += 1
# print('\r%.3f' % (counter/len(list(utility_matrix.keys()))), len(utility_matrix[user]), end='')
for i in range(len(utility_matrix[user])):
this_item = utility_matrix[user][i]
others = utility_matrix[user][:]
del others[i]
for other_item in others:
# addition
x = item_id_to_index_mapping[this_item]
y = item_id_to_index_mapping[other_item]
if not smoothing:
cooccurrence_matrix[x, y] += 1
# print(cooccurrence_matrix[x,y])
elif smoothing:
assert 1 == 2
# smoothing
# print(cooccurrence_matrix.maxprint)
cooccurrence_matrix = cooccurrence_matrix.toarray()
return cooccurrence_matrix
if __name__ == "__main__":
cur_dir = os.path.dirname(__file__)
project = ['online_shopping', 'artificial', 'microsoft', 'music','kaggle', 'store', 'bank_competition'][1]
window_size = 30
dimension = 50
model_name = ['labelled', 'unlabelled', 'uniform_10000_1000'][2]
file_path = os.path.join(cur_dir, '..', project, model_name)
model_path = os.path.join(cur_dir, '..', project, 'models', model_name+str(window_size)+str(dimension)+'.txt')
embedding = [False, True][1]
rank = [False, True][1]
smooth = [False, True][0]
regenerate_model = [False, True][0]
binary = [False, True][0]
offset = [1][0]
color_scheme = [plt.cm.Blues, plt.cm.Spectral_r][1]
param_dict = {
"user_id_col" : 0,
'item_id_col' : 1,
'quantity_col': 2,
'plot_title': 'item popularity'
}
try:
# for debugging
# raise FileNotFoundError()
if embedding:
co_matrix_file_path = os.path.join(cur_dir, '..', project, '_'.join(['cooccurrent_matrix_embedding', str(window_size), str(dimension)]))
if regenerate_model:
os.remove(co_matrix_file_path)
matrix_file = open(co_matrix_file_path, 'rb')
else:
co_matrix_file_path = os.path.join(cur_dir, '..', project, '_'.join(['cooccurrent_matrix']))
if regenerate_model:
os.remove(co_matrix_file_path)
matrix_file = open(co_matrix_file_path, 'rb')
co_matrix = np.loadtxt(matrix_file)
except FileNotFoundError:
print('building similarity matrix....', end='')
if embedding:
word2vec_encoder = gensim.models.Word2Vec.load_word2vec_format(model_path, binary=False)
co_matrix = gen_co_occurrence_matrix(file_path, param_dict, similarity=embedding, model=word2vec_encoder, smoothing=smooth)
matrix_file_path = os.path.join(cur_dir, '..', project, '_'.join(['cooccurrent_matrix_embedding', str(window_size), str(dimension)]))
else:
co_matrix = gen_co_occurrence_matrix(file_path, param_dict, smoothing=smooth)
matrix_file_path = os.path.join(cur_dir, '..', project, '_'.join(['cooccurrent_matrix']))
matrix_file = open(matrix_file_path, 'wb')
np.savetxt(matrix_file, co_matrix)
print('done')
copy = co_matrix
stats = []
options = range(0, 200, 10)
options = [x/100 for x in options]
print(options)
if embedding:
# spectrum analysis
for option_index in range(len(options)):
co_matrix = copy.copy()
if binary: # binary
threshold = options [option_index]
co_matrix[co_matrix >= threshold] = threshold
co_matrix[co_matrix < threshold] = ZERO
num_max = len(np.where(co_matrix == threshold)[0])
stats.append(num_max/co_matrix.shape[0]*co_matrix.shape[1])
title_stats = ' threshold: '+str(threshold)
else:
if option_index == len(options) - 1:
continue
size = 1
low_bound, upper_bound = options[option_index] , \
options[option_index+size]
co_matrix += offset
print('min value, max value:',co_matrix.min(), co_matrix.max())
print('lower_bound, upper_bound:', low_bound, upper_bound)
co_matrix[co_matrix == 0] = ZERO
num_between = len(co_matrix[(low_bound < co_matrix) & (co_matrix < upper_bound)])
print('num between', num_between)
stats.append(num_between)
title_stats = ' range: ('+str(low_bound)+','+str(upper_bound)+')'
model = SpectralCoclustering(n_clusters=5, )# random_state=5, )
model.fit(co_matrix)
fit_data = co_matrix[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
co_matrix = copy.copy()
# min_value = 0
max_value = co_matrix.max()
if not embedding:
sigmoid = np.vectorize(sigmoid, otypes=[np.float])
co_matrix = sigmoid(co_matrix)
co_matrix[co_matrix == 0] = ZERO
max_value = co_matrix.max()
min_value = co_matrix.min()
print(min_value, max_value)
model = SpectralCoclustering(n_clusters=50, )# random_state=5, )
model.fit(co_matrix)
np.set_printoptions(threshold=np.nan)
fit_data = co_matrix[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
# model = SpectralBiclustering()
# clustered plot
# cax = plt.imshow(co_matrix, cmap=color_scheme)
if not rank:
cax = sns.plt.matshow(co_matrix, cmap=color_scheme)
else:
cax = sns.plt.matshow(fit_data, cmap=color_scheme)
if min_value > 0:
min_value = 0
plt.colorbar(cax, ticks=[min_value, max_value])
# plt.imshow(fit_data, cmap=color_scheme)
plt.show()
if embedding:
# y = [int(x) for x in stats]
y = [x for x in stats]
print(y)
# y.reverse()
x = [float(i)-offset for i in options[1:]]
sns.barplot(x, y)
sns.plt.show()
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,621 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /visualization/xls_to_csv.py | import xlrd
import csv
def csv_from_excel(xls, tab_name, output_path):
wb = xlrd.open_workbook(xls)
sh = wb.sheet_by_name(tab_name)
your_csv_file = open(output_path, 'density')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in range(sh.nrows):
wr.writerow(sh.row_values(rownum))
your_csv_file.close()
if __name__ == "__main__":
input_file= 'data/Online Retail.xlsx'
output_file = 'data/online_retails.csv'
tab_name = 'Online Retail'
csv_from_excel(input_file, tab_name, output_file) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,622 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /density_analysis/create_subclusters.py | import csv, os
from common.future.charles.utils.read_transaction import gen_utility_matrix
cur_dir = os.path.join(os.path.dirname(__file__))
if __name__ == "__main__":
# l1 = ['1' ,'2', '3']
# my_dict = dict(zip(l1, range(len(l1))))
# inv_map = {v: k for k, v in my_dict.items()}
# print(my_dict)
# print(inv_map)
file_path = os.path.join(cur_dir, 'toy_transaction')
gen_utility_matrix(file_path) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,623 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /visualization/1st_order_correlation.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import csv, os
import seaborn as sns
import matplotlib.pyplot as plt
import math
import collections
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def read_csv(file_path, index_dict,ignore_null=True, binary_utility=True, sort=False):
user_id_col = index_dict['user_id_col']
item_id_col = index_dict['item_id_col']
quantity_col = index_dict['quantity_col']
ignore = 1
user_stats = {}
item_stats = {}
item_distribution = []
with open(file_path) as csv_file:
spamrader = csv.reader(csv_file, delimiter=',', quotechar='\"')
for row in spamrader:
if ignore > 0:
ignore -= 1
continue
# item distribution
user_id, item_id, quantity = row[user_id_col], row[item_id_col], int(row[quantity_col])
if ignore_null and user_id == '':
continue
if binary_utility:
quantity = 1
item_distribution.append(item_id)
# utility matrix
if user_id != "":
if user_id in user_stats.keys():
user_stats[user_id] += quantity
else:
user_stats[user_id] = quantity
if item_id in item_stats.keys():
item_stats[item_id] += quantity
else:
item_stats[item_id] = quantity
avg_user_activity = sum(user_stats.values())/len(user_stats)
avg_item_utility = sum(item_stats.values())/len(item_stats)
# avg_user_activity = sum(user_stats.values())
# avg_item_utility = sum(item_stats.values())
print("average item utility",avg_item_utility,'number items', len(item_stats))
print('average user utility', avg_user_activity,'number users', len(user_stats))
print('item density',avg_item_utility/len(item_stats))
print('user density',avg_user_activity/len(user_stats))
print('(item utility / user utility) ratio', avg_item_utility/avg_user_activity)
avg_user_activity = 'average user utility:'+str(avg_user_activity)
avg_item_utility = 'average item utility: ' + str(avg_item_utility)
label = ' \n'.join([avg_user_activity, avg_item_utility])
# convert item distribution to sorted int
counts = collections.Counter(item_distribution)
# unsorted
# sorted
if sort:
frequency_list = counts.most_common(len(counts))
else:
frequency_list = counts.items()
frequency_list = [x[1] for x in frequency_list]
return user_stats, item_stats, frequency_list, label
def plot_utility(transactions, label, log_y=False, log_x=False):
if log_y:
# nice mass representation
values = [math.acosh(x) for x in transactions if x > 0]
# values = [math.log10(x[1]) for x in transactions if x[1] > 0]
# future
# values = [math.asinh(x) for x in transactions if x > 0]
else:
values = [x for x in transactions if x > 0]
# ax = sns.countplot(values)
sns.plt.plot(values, label=label)
# ax = sns.distplot(values, label=label, norm_hist=False, bins=200)
# sns.distplot(values, label=label, rug=True, hist=False)
if __name__ == "__main__":
def config_plot(plot_path, title, show=False):
plt.xlim(0)
plt.autoscale()
# plt.ylim(0)
plt.legend()
plt.xlabel('item')
plt.ylabel('utility')
if show:
sns.plt.show()
sns.plt.title(title)
sns.plt.savefig(plot_path)
sns.plt.cla()
dash_board = [['gift', 'movielens100k'][0]]
# dash_board = ['gift', 'music', 'microsoft']
show_plot = True
sort = True
if 'gift' in dash_board:
param_dict = {
"user_id_col" : 0,
'item_id_col' : 1,
'quantity_col': 2,
'plot_title': 'item utility'
}
file_path = os.path.join('..', 'online_shopping', 'transaction_unlabelled')
user, item, item_dist, label = read_csv(file_path, param_dict, ignore_null=True, sort=sort)
plot_utility(item_dist, label, log_y=False)
figure_path = os.path.join('plots', 'gift_item_popularity')
config_plot(figure_path, param_dict['plot_title'],show=show_plot)
elif 'movielens100k' in dash_board:
param_dict = {
"user_id_col" : 0,
'item_id_col' : 1,
'quantity_col': 2,
'plot_title': 'item utility'
}
file_path = os.path.join('..','movielens100k', 'transaction_unlabelled')
user, item, item_dist, label = read_csv(file_path, param_dict, ignore_null=True, sort=sort)
plot_utility(item_dist, label, log_y=False)
figure_path = os.path.join('plots', 'store_item_popularity')
config_plot(figure_path, param_dict['plot_title'],show=show_plot)
if 'artificial' in dash_board:
param_dict = {
"user_id_col" : 0,
'item_id_col' : 1,
'quantity_col': 2,
'plot_title': 'item popularity'
}
file_path = os.path.join('..', 'artificial', 'uniform_6000_300')
file_path = os.path.join('..', 'artificial', 'exponential_33000_285')
user, item, item_dist, label = read_csv(file_path, param_dict, ignore_null=True, sort=sort)
plot_utility(item_dist, label, log_y=False)
figure_path = os.path.join('plots', 'artificial_item_popularity')
config_plot(figure_path, param_dict['plot_title'],show=show_plot) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,624 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /temp.py | from sklearn.metrics.pairwise import cosine_similarity
x = [[0.1,0.9]]
y = [[0.3,0.5] ]
ret =cosine_similarity(x, y)
print(ret) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,625 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /earth_mover_distance_user_user_CF.py | # https://github.com/garydoranjr/pyemd
# from emd import emd
from pyemd import emd
from numpy import array
import numpy as np
import gensim, os
# env settings
cur_dir = os.path.dirname(__file__)
project = 'movielens100k'
# project = 'artificial'
# artificial data are generated by simulator module
labelled_data = ['short', 'long'][0]
unlabelled_data = ['short', 'long'][0]
test_file_name = ['te_short', 'te_long'][0]
train_file_name = ['tr_short', 'tr_long'][1]
test_file_path = os.path.join(cur_dir, project, test_file_name)
train_file_path = os.path.join(cur_dir, project, train_file_name)
control_1 = 0
scalar = [0.5, 1, 1.5, 2, 2.5, 3][3] # 0.5 for sg, 2 for cbow
dimension = [40, 70][0]
top_n = 1
num_item_in_query = 1
num_bins = 5
transaction_folder = os.path.join(cur_dir, project)
# parameters
sample = [True, False][0]
gen_data = [True, False][1]
cbow = [True, False][0]
train = [True, False][control_1]
hs = [0, 1][0]
# alpha = False # 1 -> short term 0 -> long term False -> without utility True -> with utility
alpha = 1 # 1 -> short term 0 -> long term False -> without utility True -> with utility
if train:
epoch = 1
else:
epoch = 50
'''
movie lens sg
10% remain 0.6
'''
# short term model
model_window_size = 5
model_input_file_name = 'tr_' + unlabelled_data
model_name = unlabelled_data + str(model_window_size) + str(dimension) + ('cbow' if cbow else 'sg') + str(
scalar).replace('.', '')
model_input_file_path = os.path.join(cur_dir, project, model_input_file_name)
model_path = os.path.join(cur_dir, project, 'models', model_name + '.txt')
# impl
X = np.random.rand(10,50)
for _ in range(19573):
Y = np.random.rand(10,50)
# emd(X,Y)
# print emd(X, Y)
embedding = gensim.models.Word2Vec.load_word2vec_format(model_path, binary=False)
vocab = embedding.vocab
vocab_list = list(vocab.keys())
vocab_idx = range(len(vocab_list))
name2idx = dict(zip(vocab_list, vocab_idx))
idx2name = dict(zip(vocab_idx, vocab_list))
train_set_path = train_file_path
train = (line.strip().split(' ') for line in open(train_set_path, 'r'))
# labels are used in K-nearest neighbor to calculate similar users
# each label is a n-dim histogram
labels = []
for receipt in train:
this_receipt = [0] * len(name2idx)
for item in receipt:
try:
this_receipt[name2idx[item]] += 1
except KeyError:
continue
labels.append(np.array(this_receipt))
# build distance matrix
vertices = [None] * len(vocab_list)
for i in range(len(vocab_list)):
vertices[i] = embedding[idx2name[i]]
from sklearn.metrics.pairwise import cosine_similarity
distance_matrix = np.array(cosine_similarity(vertices, vertices)).astype(np.float64)
# build test set
test = (line.strip().split(' ') for line in open(test_file_path, 'r'))
test_set = []
for receipt in test:
this_receipt = [0] * len(name2idx)
for item in receipt:
try:
this_receipt[name2idx[item]] += 1
except KeyError:
continue
test_set.append(np.array(this_receipt))
labels = np.array(labels).astype(np.float64)
test_set = np.array(test_set).astype(np.float64)
similarity_vector = []
test_sample = test_set[3]
non_zero = np.nonzero(test_sample)[0]
remain = non_zero[:int(len(non_zero)/2)]
removed = non_zero[int(len(non_zero)/2):]
print('removed', removed)
print('remain', remain)
test_sample[removed] = 0
# 1350
# exit()
for label in labels[:]:
ret = emd(test_sample, label, distance_matrix)
similarity_vector.append(ret)
similarity_vector = np.array(similarity_vector)
neighbourhood = np.argsort(similarity_vector)[:30]
# print(neighbourhood)
top_similar = similarity_vector[neighbourhood]
import math
normalised_sum = sum([math.exp(x) for x in top_similar])
aggregation = labels[neighbourhood][0]
for user in labels[neighbourhood][1:]:
# weighted average of reversed softmax
aggregation += (1 - emd(test_sample, user, distance_matrix)/normalised_sum) * user
# aggregation += user
print(np.argsort(aggregation)[-5:])
# compute neighborhood U using EMD
# aggregate (weighted average) the items that all users in neighborhood U purchased
# user the top n from ranked items for recommendation | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,626 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /data_processing/get_columns_from_csv.py | import csv
def get_columns_movie_lens(csv_path, output_path, cols, delimiter=',', quotechar='"'):
output_stream = open(output_path, 'w')
with open(csv_path, 'r') as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
writer = csv.writer(output_stream, delimiter=',', quotechar=quotechar)
for rows in reader:
if int(rows[2]) >= 4:
row = [rows[i] for i in cols]
row.insert(2, 1)
else:
continue
writer.writerow(row)
output_stream.close()
def get_columns(csv_paths, output_path, cols, delimiter=',', quotechar='"'):
output_stream = open(output_path, 'w')
for csv_path in csv_paths:
with open(csv_path, 'r') as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
writer = csv.writer(output_stream, delimiter=',', quotechar=quotechar)
for rows in reader:
row = [rows[i].strip() for i in cols]
writer.writerow(row)
output_stream.close()
if __name__ == "__main__":
import os
project = 'movie_lens_100k'
project = 'tafeng'
project = 'gift_receipt'
cur_dir = os.path.dirname(__file__)
# gift_store_indices
input_path = [os.path.join(cur_dir, project, 'raw')]
output_path = os.path.join(cur_dir, project, 'cleaned')
columns = [0, 2, 4, 5]
get_columns(input_path, output_path, columns)
# movie lens
# input_path = [os.path.join(cur_dir, project, 'raw')]
# output_path = os.path.join(cur_dir, project, 'cleaned')
# columns = [0, 1, 3]
# get_columns_movie_lens([input_path], output_path, columns, delimiter='\t')
# tafeng
# input_path = [os.path.join(cur_dir, project, 'D'+x) for x in ['01', '02', '11', '12']]
# output_path = os.path.join(cur_dir, project, 'cleaned')
# columns = [1, 5, 6, 0]
# get_columns(input_path, output_path, columns, delimiter=';')
# gift_store_indices
# input_path = [os.path.join(cur_dir, project, 'raw')]
# output_path = os.path.join(cur_dir, project, 'cleaned')
# columns = [6, 2, 4, 5]
# get_columns(input_path, output_path, columns)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,627 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /stacked_auto_encoder.py | from keras.layers import Input, Dense, regularizers
from keras.models import Model
import keras
from math import sqrt, log
from utils.read_transaction import gen_utility_matrix, split_utility_matrix
import numpy as np
import tensorflow as tf
def StackedAutoEncoder(dimension, encoding_dim, x_train, y_train, x_test, y_test, activation_1, activation_2, loss_function, epoch=30, factor=4, bias=False):
# this is the size of our encoded representations
input_matrix = Input(shape=(dimension,))
encoded = Dense(128*factor, activation=activation_1, activity_regularizer=regularizers.l2(10e-4),bias=bias)(input_matrix)
encoded = Dense(64*factor, activation=activation_1,bias=bias)(encoded)
encoded = Dense(encoding_dim, activation=activation_1,bias=bias)(encoded)
decoded = Dense(64*factor, activation=activation_1,bias=bias)(encoded)
decoded = Dense(128*factor, activation=activation_1, bias=bias)(decoded)
# decoded = Dense(dimension, activation=activation_2, bias=bias)(decoded)
decoded = Dense(dimension, activation=activation_2, activity_regularizer=regularizers.l2(10e-4), bias=bias)(decoded)
autoencoder = Model(input=input_matrix, output=decoded)
# encoder = Model(input=input_matrix, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input_1 = Input(shape=(encoding_dim,))
encoded_input_2 = Input(shape=(64*factor,))
encoded_input_3 = Input(shape=(128*factor,))
encoder_layer_1 = autoencoder.layers[-6]
encoder_layer_2 = autoencoder.layers[-5]
encoder_layer_3 = autoencoder.layers[-4]
decoder_layer_1 = autoencoder.layers[-3]
decoder_layer_2 = autoencoder.layers[-2]
decoder_layer_3 = autoencoder.layers[-1]
encoder_1 = Model(input=input_matrix, output=encoder_layer_1(input_matrix))
encoder_2 = Model(input=encoded_input_3, output=encoder_layer_2(encoded_input_3))
encoder_3 = Model(input=encoded_input_2, output=encoder_layer_3(encoded_input_2))
# create the decoder model
decoder_1 = Model(input = encoded_input_1, output = decoder_layer_1(encoded_input_1))
decoder_2 = Model(input = encoded_input_2, output = decoder_layer_2(encoded_input_2))
decoder_3 = Model(input = encoded_input_3, output = decoder_layer_3(encoded_input_3))
# optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
# optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
optimizer = keras.optimizers.Adadelta()
autoencoder.compile(optimizer=optimizer, loss=loss_function)
autoencoder.fit(x_train, y_train,
nb_epoch= epoch,
batch_size=40,
shuffle=True,
validation_data=(x_test, y_test))
# return encoder, [decoder_1, decoder_2, decoder_3]
return [encoder_1, encoder_2, encoder_3], [decoder_1, decoder_2, decoder_3]
def visualize_result(encoder, decoder, test, input_dim, embedded_dim):
# print(dir(encoder[0]))
# print(encoder[0].get_weights())
# exit()
# encode and decode some digits
# note that we take them from the *test* set
dim_x = int(sqrt(input_dim))
print(input_dim, dim_x)
# encoded_imgs = encoder.predict(test)
encoded_imgs = encoder[0].predict(test)
encoded_imgs = encoder[1].predict(encoded_imgs)
encoded_imgs = encoder[2].predict(encoded_imgs)
from sklearn.metrics.pairwise import cosine_similarity
this = encoded_imgs[0]
for other in encoded_imgs[1:]:
print(cosine_similarity(this.reshape(1, -1), other.reshape(1, -1)))
decoded_imgs = decoder[0].predict(encoded_imgs)
decoded_imgs = decoder[1].predict(decoded_imgs)
decoded_imgs = decoder[2].predict(decoded_imgs)
print(test.shape)
print(decoded_imgs.shape)
print(type(decoded_imgs), type(decoded_imgs[0]))
# exit()
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(30, 4))
avg = []
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
# plt.imshow(test[i].reshape(dim_x, dim_x))
indices = test[i].argsort()
original_matrix = test[i][indices[::-1]]
original_matrix = np.resize(original_matrix, (dim_x, dim_x))
plt.imshow(original_matrix)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(encoded_imgs[i].reshape(int(sqrt(embedded_dim)), int(sqrt(embedded_dim))))
# plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + 2 * n)
# threshold the data
# threshold = np.average(decoded_imgs[i]) + np.std(decoded_imgs[i])
# threshold = 0.9
# decoded_imgs[i][decoded_imgs[i]>threshold] = 1
# decoded_imgs[i][decoded_imgs[i]<=threshold] = 0
avg.append(np.average(decoded_imgs[i]))
# print(test[i])
# print(decoded_imgs[i], max(decoded_imgs[i]))
# exit()
# plt.imshow(decoded_imgs[i].reshape(dim_x, dim_x))
decoded_matrix = decoded_imgs[i][indices[::-1]]
decoded_matrix = np.resize(decoded_matrix, (dim_x, dim_x))
plt.imshow(decoded_matrix)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
print(avg)
plt.show()
def Noising(x_train, x_test, noise_factor):
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
# todo @charles the clip -1 to 1 or 0 to 1??
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
return x_train_noisy, x_test_noisy
def Corrupt(x_train, corrupt_factor):
x_train_corrupt = x_train
if __name__ == "__main__":
import os
cur_dir = os.path.dirname(__file__)
transaction_file = os.path.join(cur_dir, 'toy_transaction')
# online_shopping_transaction_file = os.path.join(cur_dir, 'online_shopping', 'unlabelled')
online_shopping_transaction_file = os.path.join(cur_dir, 'movielens100k', 'labeled100k')
online_shopping_transaction_file = os.path.join(cur_dir, 'movielens100k', 'unlabeled_weekly')
# online_shopping_transaction_file = os.path.join(cur_dir, 'movielens100k', 'unlabeled')
np.set_printoptions(threshold=np.nan)
utility_data = gen_utility_matrix(online_shopping_transaction_file)
utility_matrix, idx2user, idx2item = utility_data['utility_matrix'], utility_data['idx2user'], utility_data[
'idx2item']
print(utility_matrix.shape)
x_train, x_test = split_utility_matrix(utility_matrix, 0.9)
# from keras.datasets import mnist
# import numpy as np
# (x_train, _), (x_test, _) = mnist.load_data()
# x_train = x_train.astype('float32') / 255.
# x_test = x_test.astype('float32') / 255.
# x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
# x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
train_noisy, test_noisy = Noising(x_train, x_test, noise_factor=0.9)
num_samples, dimension = x_train.shape
embedded_dimension = 64
print('training set', x_train.shape)
print('testing set', x_test.shape)
# encoder, decoder = StackedAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'linear', 'linear', 'binary_crossentropy', epoch=20)
# kullback_leibler_divergence
encoder, decoder = StackedAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'linear', 'sigmoid', 'binary_crossentropy', epoch=50, factor=1, bias=True)
# visualize_result(encoder, decoder, x_test, dimension, embedded_dimension)
visualize_result(encoder, decoder, x_train, dimension, embedded_dimension) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,628 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /simulator.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
from scipy import sparse
from abc import ABCMeta, abstractmethod
import random
import math
def sample_gen(n, forbid):
state = dict()
track = dict()
for (i, o) in enumerate(forbid):
x = track.get(o, o)
t = state.get(n-i-1, n-i-1)
state[x] = t
track[t] = x
state.pop(n-i-1, None)
track.pop(o, None)
del track
for remaining in range(n-len(forbid), 0, -1):
i = random.randrange(remaining)
yield state.get(i, i)
state[i] = state.get(remaining - 1, remaining - 1)
state.pop(remaining - 1, None)
def project(l1, low, high):
OldMax = max(l1)
OldMin = min(l1)
NewMax = high
NewMin = low
OldRange = (OldMax - OldMin)
NewRange = (NewMax - NewMin)
if OldRange == 0:
NewValue = [NewMin for _ in l1]
else:
NewValue = [(((x - OldMin) * NewRange) / OldRange) + NewMin for x in l1]
return NewValue
class RandomGenerator(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def rand_float(self, scalar):
pass
@abstractmethod
def draw_sample(self, bound):
pass
class Exponential(RandomGenerator):
def __init__(self):
RandomGenerator.__init__(self)
def rand_float(self, scalar=1):
return random.random() * scalar
def draw_sample(self, lambd):
return random.expovariate(lambd)
class ErdosRenyi(RandomGenerator):
def __init__(self):
RandomGenerator.__init__(self)
def rand_float(self, scalar=1):
return random.random() * scalar
def draw_sample(self, bound, replacement=False):
gen = sample_gen(bound, [])
if replacement:
return next(gen)
else:
return random.randint(0, bound)
class Simulator(object):
def __init__(self, dist):
self.dist = dist
pass
def draw_sample(self, size):
return self.dist.draw_sample(size)
def flip(self, weight):
return 1 if self.dist.rand_float(1) > weight else 0
def gen_transactions(self, n_u, n_i, density, output_path=None):
# generate the rectangular
# and randomize the rectangular
num_samples = int(n_u * n_i * density)
samples = []
if isinstance(self.dist, Exponential):
lambd = 3
for _ in range(num_samples):
samples.append(self.draw_sample(lambd))
samples = project(samples, 0, 1)
samples = [int(x * (n_i-1)) for x in samples]
print(min(samples),max(samples))
elif isinstance(self.dist, ErdosRenyi):
for _ in range(num_samples):
samples.append(int(self.dist.rand_float()*num_items))
else:
raise Exception('Unknown distribution:', type(self.dist))
utility_matrix = sparse.lil_matrix((n_u, n_i), dtype=int)
print('to be filled:', num_samples)
counter = 0
for u in range(n_u):
item_set = set()
endurence = n_i
for _ in range(n_i):
if len(samples) <= 0:
break
if self.flip(1-density):
pointer = 0
while samples[pointer] in item_set:
endurence -= 1
if endurence < 0:
break
pointer = random.randint(0, len(samples)-1)
# print(samples)
if endurence < 0:
break
counter += 1
item_set.add(samples[pointer])
utility_matrix[u, samples.pop(pointer)] = 1
print('filled in:', counter)
if output_path is not None:
output_stream = open(output_path, 'w')
users,items = utility_matrix.nonzero()
item_count = {}
user_count = {}
for user_id,item_id in zip(users,items):
if item_id in item_count.keys():
item_count[item_id] += 1
else:
item_count[item_id] = 1
if user_id in user_count.keys():
user_count[user_id] += 1
else:
user_count[user_id] = 1
transaction = [user_id,item_id,utility_matrix[user_id,item_id]]
transaction = [str(x) for x in transaction]
output_stream.write(",".join(transaction)+'\n')
output_stream.close()
return utility_matrix
if __name__ == "__main__":
import os
cur_dir = os.path.dirname(__file__)
project_root = os.path.join(cur_dir, '..')
output_folder = os.path.join(project_root, 'charles', 'artificial')
# e_r = ErdosRenyi()
e_r = Exponential()
if isinstance(e_r, ErdosRenyi):
model_name = 'uniform'
elif isinstance(e_r, Exponential):
model_name = 'exponential'
else:
raise Exception('Unknown model:', type(e_r))
sim = Simulator(e_r)
records = []
# for _ in range(10000):
num_users = 500
num_items = 200
density = 0.03
print('number users:', num_users, 'num_items:', num_items)
output_file_path = os.path.join(output_folder, '_'.join([model_name, str(num_users), str(num_items)])) #, str(density).replace('.','_')
print(output_file_path)
transactions = sim.gen_transactions(num_users,num_items, density, output_file_path)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,629 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /data_processing/microsoft/gen_clean.py | import csv, os
def gen_data_from_raw(input_path, output_path):
output_stream = open(output_path, 'w')
writer = csv.writer(output_stream, delimiter=',', quotechar='"')
with open(input_path, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
user_id = None
for row in reader:
if row[0] == 'C':
user_id = row[1]
elif row[0] == 'V':
item_id = row[1]
writer.writerow([user_id, item_id, 1, 0])
output_stream.close()
pass
if __name__ == "__main__":
cur_dir = os.path.dirname(__file__)
file_name = os.path.join(cur_dir, 'anonymous-msweb.data')
out_file = os.path.join(cur_dir, 'cleaned')
gen_data_from_raw(file_name, out_file) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,630 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /utils/read_transaction.py | import csv
from scipy.sparse import lil_matrix
import numpy as np
def read_transaction(transaction_file_path):
with open(transaction_file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
print(', '.join(row))
def gen_utility_matrix(transaction_file, rating=False):
user_set = set()
item_set = set()
transactions = []
with open(transaction_file, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
user_id, item_id, quantity = row[0], row[1], row[2]
if user_id == '':
continue
user_set.add(user_id)
item_set.add(item_id)
transactions.append((user_id, item_id, quantity))
print('num users:', len(user_set), 'num items:', len(item_set))
# build user-idx and item-idx mapping for utility matrix
user2idx = dict(zip(user_set, range(len(user_set))))
idx2user = dict(zip(range(len(user_set)), user_set))
item2idx = dict(zip(item_set, range(len(item_set))))
idx2item = dict(zip(range(len(item_set)), item_set))
utility_matrix = lil_matrix((len(user_set), len(item_set)), dtype=np.int8)
for transaction in transactions:
user_id, item_id, quantity = transaction
if rating:
if quantity == '4':
quantity = 1
elif quantity == '4.5':
quantity = 2
elif quantity == '5':
quantity = 3
else:
quantity = int(quantity)
# quantity = int(quantity)
utility_matrix[user2idx[user_id], item2idx[item_id]] += quantity
utility_package = {
'utility_matrix': utility_matrix.toarray(),
'idx2user': idx2user,
'user2idx': user2idx,
'item2idx': item2idx,
'idx2item': idx2item
}
return utility_package
def split_utility_matrix(utility_matrix, ratio):
num_row, num_col = utility_matrix.shape
train = num_row * ratio
return np.split(utility_matrix, [int(train)])
if __name__ == "__main__":
import os
# printing setting
np.set_printoptions(threshold=np.nan)
cur_dir = os.path.dirname(__file__)
transaction_file = os.path.join(cur_dir, 'toy_transaction')
online_shopping_transaction_file = os.path.join(cur_dir, '..', 'online_shopping', 'labelled')
utility_data = gen_utility_matrix(online_shopping_transaction_file)
utility_matrix, idx2user, idx2item = utility_data['utility_matrix'], utility_data['idx2user'], utility_data['idx2item']
print(utility_matrix.shape)
print(type(utility_matrix))
train, test = split_utility_matrix(utility_matrix, 0.9)
print(train[0], len(train))
print(len(test))
# print(utility_matrix[0], idx2user[0])
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,631 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /skip_gram.py | from keras.layers import Input, Dense, regularizers
from keras.models import Model
import numpy as np
import random, gensim
def gen_data_set(data_path, vocab, embedding, ratio=0.9, sample_rate=2.0*10e-2):
item2idx = dict(zip(vocab, range(len(vocab))))
idx2item = dict(zip(range(len(vocab)), vocab))
train_inputs = []
train_labels = []
validation_inputs = []
validation_labels = []
# coding
data = open(data_path, 'r')
for receipt in data:
receipt = receipt.strip().split(' ')
for i in range(len(receipt)):
if random.random() <= sample_rate:
x_input = receipt[i]
if x_input not in vocab:
continue
remain = receipt.copy()
remain.pop(i)
labels = np.zeros(len(vocab))
history = []
for x in remain:
try:
history.append(item2idx[x])
except KeyError as e:
continue
labels[history] = 1
if random.random() < ratio:
train_inputs.append(embedding[x_input].tolist())
train_labels.append(labels.tolist())
else:
validation_inputs.append(embedding[x_input].tolist())
validation_labels.append(labels.tolist())
return (train_inputs, train_labels), (validation_inputs, validation_labels)
def AutoEncoder(encoding_dim, x_train, y_train, x_validation, y_validation, activation_1, activation_2, loss_function, epoch=30, bias=True):
# this is the size of our encoded representations
# dimension = 784
# this is our input placeholder
input_dimension = len(x_train[0])
output_dimension = len(y_train[0])
input_matrix = Input(shape=(input_dimension,))
# "encoded" is the encoded representation of the input
print('activation:', activation_1, activation_2)
print(loss_function)
# encoded = Dense(encoding_dim, activity_regularizer=regularizers.l2(10e-4), activation=activation_1, bias=bias)(input_matrix)
encoded = Dense(encoding_dim, activation=activation_1, bias=bias)(input_matrix)
decoded = Dense(output_dimension, activation=activation_2, bias=bias)(encoded)
autoencoder = Model(input=input_matrix, output=decoded)
encoder = Model(input=input_matrix, output=encoded)
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
# create the decoder model
autoencoder.compile(optimizer='adadelta', loss=loss_function)
autoencoder.fit(x_train, y_train,
nb_epoch=epoch,
batch_size=256,
shuffle=True,
validation_data=(x_validation, y_validation))
return encoder, decoder
if __name__ == "__main__":
comments = '''
change the sample strategy, choose nearby words in the content window and try train a model base on the sampled words.
'''
print(comments)
import os
np.set_printoptions(threshold=np.nan)
project_name = 'movielens100k'
# project_name = 'online_shopping'
# project_name = 'tafeng'
cur_dir = os.path.dirname(__file__)
epoch = 17
data_file_name = ['tr_short'][0]
test_file_name = ['te_short'][0]
model_name = ['short530sg', 'short550sg', 'short570sg', 'short5100sg'][1]+'.txt'
# model_name = ['short510cbow', 'short530cbow', 'short550cbow', 'short570cbow', 'short5100cbow'][0]+'.txt'
embedded_dimension = 50
top_n = 1
activation_functions = ['linear', 'sigmoid', 'relu', 'softmax']
objective_functions = ['binary_crossentropy', 'kullback_leibler_divergence', 'mae', 'mse', 'mean_absolute_percentage_error'][0]
activation_function_1 = activation_functions[1]
activation_function_2 = activation_functions[0]
print(project_name, activation_function_1, activation_function_2, objective_functions, model_name, 'epoch', epoch, 'top', top_n)
online_shopping_transaction_file = os.path.join(cur_dir, project_name, data_file_name)
online_shopping_model_path = os.path.join(cur_dir, project_name, 'models', model_name)
embedding = gensim.models.Word2Vec.load_word2vec_format(online_shopping_model_path, binary=False)
vocab = embedding.vocab
(train_inputs, train_labels), (validation_inputs, validation_labels) = gen_data_set(online_shopping_transaction_file, vocab, embedding, ratio=0.9, sample_rate=10e-1)
prev = []
for x in train_labels:
print(np.where(np.array(x) != 0)[0])
exit()
item2idx = dict(zip(vocab, range(len(vocab))))
idx2item = dict(zip(range(len(vocab)), vocab))
encoder, decoder = AutoEncoder(embedded_dimension, train_inputs, train_labels, validation_inputs, validation_labels, activation_function_1, activation_function_2, objective_functions, epoch=epoch, bias=True)
# evaluate the precision
with open(os.path.join(cur_dir, project_name, test_file_name), 'r') as f:
hit = 0.0
counter = 0
recall = 0.0
for line in f:
items = line.strip().split(' ')
if len(items) < 2:
continue
# pick 1 random item as query in each receipt
pick = random.randrange(0, len(items))
hidden = [x for i,x in enumerate(items) if i!=pick]
try:
query = embedding[items[pick]]
encoded_query = encoder.predict(np.array([query]))
print(encoded_query)
recommendation = decoder.predict(encoded_query)
# ret = idx2item[np.argmax(recommendation[0])]
result = np.ndarray.argsort(recommendation[0])[-top_n:][::-1]
# print([recommendation[0][x] for x in result])
ret = [idx2item[x] for x in result]
print(np.argmax(recommendation[0]), items[pick])
print(query, ret)
# print(idx2item[pick], ret, set(ret).intersection(hidden))
hit += len(set(ret).intersection(hidden))
counter += top_n
recall += len(hidden)
# if ret in hidden:
# precision += 1
except KeyError:
continue
precision = hit/counter
print(hit/(counter * top_n))
print(hit/recall)
# encoder, decoder = OneLayerAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'relu', 'sigmoid', 'binary_crossentropy', epoch=50)
# encoder, decoder = OneLayerAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'relu', 'sigmoid', 'kullback_leibler_divergence', epoch=50)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,632 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /Word2VecExample.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import datetime
import os
import numpy
from Word2Vec import TestAlpha
from utils.helper import GenTrainAndTestSet, Train
cur_dir = os.path.dirname(__file__)
project_root = os.path.join(cur_dir, '..')
if __name__ == "__main__":
# best configuration for gift store data is window_size 5 dimension 30 or 50
window_size = 10
dimension = 50
# seed = 0 # the random is useless here because the collection.count() has its own random
# random.seed(seed)
# https://archive.ics.uci.edu/ml/datasets/Online+Retail
project = 'online_shopping'
# project = 'movielens100k'
# https://archive.ics.uci.edu/ml/datasets/Anonymous+Microsoft+Web+Data
# project = 'microsoft'
# project = 'kaggle'
# http://labrosa.ee.columbia.edu/projects/musicsim/aotm.html
# project = 'music'
# Belgium retail market dataset
# http://recsyswiki.com/wiki/Grocery_shopping_datasets
# project = 'store'
# project = 'artificial'
# artificial data are generated by simulator module
file_list = ['transaction_labelled', 'transaction_unlabelled', 'uniform_10000_1000', 'uniform_10000_10000'][1]
file_list = [file_list]
for source_file_name in file_list:
# source_file_name = 'uniform_33000_285'
model_prefix = source_file_name
input_file_name = 'tr_'+model_prefix
test_file_name = 'te_'+source_file_name
scalar = 0.1
sample = True
print('project:',project)
model_name = model_prefix+str(window_size)+str(dimension)
transaction_folder = os.path.join(cur_dir, project)
input_file_path = os.path.join(cur_dir, project, input_file_name)
model_path = os.path.join(cur_dir, project, 'models', model_name+'.txt')
test_file_path = os.path.join(cur_dir, project, test_file_name)
print('scalar:',scalar)
results = [list() for _ in range(9)] #list of list
epoch = 1
benchmarking_result = [list() for _ in range(epoch)]
for _ in range(epoch):
GenTrainAndTestSet(transaction_folder, source_file_name, verbose=True, ratio=0.9, is_first_col_index=False)
Train(input_file_path, model_path, scalar=scalar,window=window_size, dim=dimension, sample=sample, use_gensim=True, workers=8)
print('{0}\r'.format(str(_ * 100 / epoch)[:4] + '%'), end='')
for ratio in range(1, 10):
start = datetime.datetime.now()
result = TestAlpha(model_path, test_file_path, ratio=ratio/10, topn=1, shuffle=True)
end = datetime.datetime.now()
benchmarking_result[_].append(end-start)
# exit()
results[ratio-1].append(result)
# print(result)
# print(results)
results = [ (numpy.average(x), numpy.std(x)) for x in results]
benchmarking_result = [sum(x, datetime.timedelta()) / epoch for x in benchmarking_result]
print('lower bound',',','upper bound')
for result in benchmarking_result:
print(result)
for result in results:
avg, std = result
# print(avg-std,',',avg+std)
print(avg)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,633 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /utils/helper.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import os, random, datetime, csv
from Word2Vec import Embedding
def ConvertReceiptsToTransaction(file, output_file):
output_stream = open(output_file, 'w')
counter = 0
with open(file, 'r') as f:
for line in f:
counter +=1
play_list = line.strip().split(' ')
user_id = counter
for item in play_list:
output_stream.write(', '.join([str(user_id), item, '1'])+'\n')
output_stream.close()
# ConvertReceiptsToTransaction('store/retail.dat', 'store/unlabelled')
def ConvertMusicListToTransaction(file, output_file):
output_stream = open(output_file, 'w')
with open(file, 'r') as f:
for line in f:
line_buffer = line.strip().split(' ')
user_id = line_buffer[0]
play_list = line_buffer[1:]
play_list = [x for x in play_list if ':' not in x]
for song in play_list:
output_stream.write(', '.join([user_id, song, '1'])+'\n')
output_stream.close()
# ConvertMusicListToTransaction('music/music_list.txt', 'music/labelled')
def Encode(input_code):
output = []
input_code = input_code.replace(' ', '')
for char in input_code:
if char.isalpha():
output.append(str(ord(char)))
else:
output.append(char)
output = ''.join(output)
# use this temp encoding for testing
# output = ''.join([str(ord(x)%9) for x in input_code])
return output
def GenTrainAndTest(folder_path, file_name, ratio=0.9, verbose=False, resolution='daily', time_stamp_format='%Y%m%d', ignore_first_row=False, delimiter=',', quotechar='"'):
ret = {}
transaction_file_path = os.path.join(folder_path, file_name)
print(transaction_file_path)
with open(transaction_file_path, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
if ignore_first_row:
next(reader)
for line in reader:
user_id, item_id, quantity, time_stamp = line
if user_id == '' or float(quantity)<0:
continue
if time_stamp_format is not None:
time_stamp = datetime.datetime.strptime(time_stamp, time_stamp_format)
elif time_stamp_format is None:
time_stamp = datetime.datetime.fromtimestamp(int(time_stamp))
if resolution == 'daily':
time_stamp = time_stamp.strftime('%Y%m%d')
elif resolution == 'weekly':
# time_stamp.isocalendar() returns a 3-tuple, (ISO year, ISO week number, ISO weekday)
week_number = time_stamp.isocalendar()[1]
time_stamp = time_stamp.strftime('%Y')+str(week_number)
elif resolution == 'monthly':
time_stamp = time_stamp.strftime('%Y%m')
elif resolution == 'yearly':
time_stamp = time_stamp.strftime('%Y')
else:
raise Exception('unknown resolution option:', resolution, 'supported resolution options: daily, weekly, monthly, yearly')
item_id = Encode(item_id)
if user_id in ret.keys():
ret[user_id].append((item_id, time_stamp, quantity))
else:
ret[user_id] = [(item_id, time_stamp, quantity)]
long_train_receipts = []
short_train_receipts = []
test_long_receipts = []
test_short_receipts = []
transaction_labelled = os.path.join(folder_path, 'transaction_labelled')
transaction_unlabelled = os.path.join(folder_path, 'transaction_unlabelled')
transaction_train_unlabelled = os.path.join(folder_path, 'transaction_train_unlabelled')
transaction_train_labelled = os.path.join(folder_path, 'transaction_train_labelled')
transaction_test_unlabelled = os.path.join(folder_path, 'transaction_test_unlabelled')
transaction_test_labelled = os.path.join(folder_path, 'transaction_test_labelled')
transaction_labelled_writer = open(transaction_labelled, 'w')
transaction_unlabelled_writer = open(transaction_unlabelled, 'w')
transaction_train_unlabelled_writer = open(transaction_train_unlabelled, 'w')
transaction_train_labelled_writer = open(transaction_train_labelled, 'w')
transaction_test_unlabelled_writer = open(transaction_test_unlabelled, 'w')
transaction_test_labelled_writer = open(transaction_test_labelled, 'w')
transaction_labelled_writer_csv = csv.writer(transaction_labelled_writer, delimiter=delimiter, quotechar=quotechar)
transaction_unlabelled_writer_csv = csv.writer(transaction_unlabelled_writer, delimiter=delimiter,
quotechar=quotechar)
transaction_train_unlabelled_writer_csv = csv.writer(transaction_train_unlabelled_writer, delimiter=delimiter,
quotechar=quotechar)
transaction_train_labelled_writer_csv = csv.writer(transaction_train_labelled_writer, delimiter=delimiter,
quotechar=quotechar)
transaction_test_unlabelled_writer_csv = csv.writer(transaction_test_unlabelled_writer, delimiter=delimiter,
quotechar=quotechar)
transaction_test_labelled_writer_csv = csv.writer(transaction_test_labelled_writer, delimiter=delimiter,
quotechar=quotechar)
for user_id in ret.keys():
# decompose the long term into short term
short_ret = {}
for item in ret[user_id]:
item_id, ts, quantity = item
tmp_uid = user_id + ts
if tmp_uid in short_ret.keys():
short_ret[tmp_uid].append((item_id, ts, quantity))
else:
short_ret[tmp_uid] = [(item_id, ts, quantity)]
# write to labelled and unlabelled file
transaction_labelled_writer_csv.writerow([user_id, item_id, quantity])
transaction_unlabelled_writer_csv.writerow([tmp_uid, item_id, quantity])
if random.random() < ratio:
# append the item to the long_term_profile
for tmp_uid in short_ret.keys():
short_receipt = []
for x in short_ret[tmp_uid]:
item_id, ts, quantity = x
transaction_train_unlabelled_writer_csv.writerow([tmp_uid, item_id, quantity])
short_receipt.append(item_id)
short_train_receipts.append(short_receipt)
long_receipt = []
for x in ret[user_id]:
item_id, ts, quantity = x
long_receipt.append(item_id)
transaction_train_labelled_writer_csv.writerow([user_id, item_id, quantity])
long_train_receipts.append(long_receipt)
else:
# append the item to the long_term_profile
for tmp_uid in short_ret.keys():
short_receipt = []
for x in short_ret[tmp_uid]:
item_id, ts, quantity = x
transaction_test_unlabelled_writer_csv.writerow([tmp_uid, item_id, quantity])
short_receipt.append(item_id)
test_short_receipts.append(short_receipt)
long_receipt = []
for x in ret[user_id]:
item_id, ts, quantity = x
long_receipt.append(item_id)
transaction_test_labelled_writer_csv.writerow([user_id, item_id, quantity])
test_long_receipts.append(long_receipt)
total = .0
count = 0
for receipt in long_train_receipts:
total += len(receipt)
count += 1
print('Average long term receipt length:', total / count)
print('Number receipts:', len(ret))
total = .0
count = 0
for receipt in short_train_receipts:
total += len(receipt)
count += 1
print('Average short term receipt length:', total / count)
print('Number receipts:', len(ret))
transaction_labelled_writer.close()
transaction_unlabelled_writer.close()
transaction_train_unlabelled_writer.close()
transaction_train_labelled_writer.close()
transaction_test_unlabelled_writer.close()
transaction_test_labelled_writer.close()
with open(os.path.join(folder_path, 'tr_long'), 'w') as f:
for receipt in long_train_receipts:
f.write(' '.join(receipt)+'\n')
with open(os.path.join(folder_path, 'tr_short'), 'w') as f:
for receipt in short_train_receipts:
f.write(' '.join(receipt)+'\n')
print('total long term train receipts:', len(long_train_receipts))
print('total short term train receipts:', len(short_train_receipts))
with open(os.path.join(folder_path, 'te_long'), 'w') as f:
for receipt in test_long_receipts:
f.write(' '.join(receipt)+'\n')
with open(os.path.join(folder_path, 'te_short'), 'w') as f:
for receipt in test_short_receipts:
f.write(' '.join(receipt)+'\n')
print('total long term test receipts:', len(test_long_receipts))
print('total short term test receipts:', len(test_short_receipts))
def GenTrainAndTestSet(folder_path, file_name, ratio=0.9, verbose=False, is_first_col_index=False):
ret = {}
transaction_file_path = os.path.join(folder_path, file_name)
print(transaction_file_path)
with open(transaction_file_path, 'r') as f:
for line in f:
if is_first_col_index:
user_id, item_id, quantity = line.strip().split(',')[1:]
else:
user_id, item_id, quantity = line.strip().split(',')
item_id = Encode(item_id)
if user_id in ret.keys():
ret[user_id].append(item_id)
else:
ret[user_id] = [item_id]
if verbose:
total = .0
count = 0
for key in ret:
total += len(ret[key])
count += 1
print('Average receipt length:', total/count)
print('Number receipts:', len(ret))
train_receipts = []
test_receipts = []
for user in ret.keys():
if random.random() < ratio:
train_receipts.append(ret[user])
else:
test_receipts.append(ret[user])
with open(os.path.join(folder_path, 'tr_'+file_name), 'w') as f:
for receipt in train_receipts:
f.write(' '.join(receipt)+'\n')
print('total train receipts:', len(train_receipts))
with open(os.path.join(folder_path, 'te_'+file_name), 'w') as f:
for receipt in test_receipts:
f.write(' '.join(receipt)+'\n')
print('total test receipts:', len(test_receipts))
# write to disk
def Train(input_file_path, output_file_path, window=3, dim=70, cbow=True,
sample=True, scalar=1, workers=8, threshold=300, use_gensim=True, hs=0, model_name=''):
print('Training file:', input_file_path)
Embedding(input_file_path, output_file_path, window=window, dim=dim, cbow=cbow,
sample=sample, scalar=scalar, workers=workers, threshold=threshold, use_gensim=use_gensim, hs=hs, model_name=model_name)
print()
if __name__ == "__main__":
cur_dir = os.path.dirname(__file__)
root = os.path.join(cur_dir)
input_file_folder = os.path.join(root, 'online_shopping')
input_file_path = os.path.join('cleaned')
GenTrainAndTest(input_file_folder, input_file_path, ratio=0.9, verbose=True, resolution='daily', time_stamp_format='%m/%d/%Y %H:%M', ignore_first_row=True) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,634 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /Word2VecHybridExample.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import datetime
import os
import numpy
from Word2Vec import HybridModel, iiCF, uuCF
from utils.helper import Train, GenTrainAndTest
cur_dir = os.path.dirname(__file__)
project_root = os.path.join(cur_dir, '..')
if __name__ == "__main__":
print('''
Try user-user embedding, embed users to a embedded space.
After embedding use user-user collaborative filtering on the embedded space.
1. Find most similar users
2. Weighted average or linear regression of the k most similar users.
3. User the regenerated value for recommendation
''')
# best configuration for gift store data is window_size 5 dimension 30 or 50
# seed = 0 # the random is useless here because the collection.count() has its own random
# random.seed(seed)
# https://archive.ics.uci.edu/ml/datasets/Online+Retail
project = 'online_shopping'
# project = 'gift_receipt'
# project = 'microsoft'
# project = 'tafeng'
project = 'movielens100k'
# project = 'artificial'
# artificial data are generated by simulator module
labelled_data = ['short', 'long'][0]
unlabelled_data = ['short', 'long'][0]
test_file_name = ['te_short', 'te_long'][0]
test_file_path = os.path.join(cur_dir, project, test_file_name)
control_1 = 0
scalar = [0.5, 1, 1.5, 2, 2.5, 3][3] # 0.5 for sg, 2 for cbow
dimension = [40, 70][0]
top_n = 1
num_item_in_query = 1
num_bins = 5
transaction_folder = os.path.join(cur_dir, project)
# parameters
sample = [True, False][0]
gen_data = [True, False][1]
cbow = [True, False][0]
train = [True, False][control_1]
hs = [0,1][0]
# alpha = False # 1 -> short term 0 -> long term False -> without utility True -> with utility
alpha = 1 # 1 -> short term 0 -> long term False -> without utility True -> with utility
if train:
epoch = 1
else:
epoch = 50
'''
movie lens sg
10% remain 0.6
'''
# long term model
long_window_size = 10
long_term_input_file_name = 'tr_'+labelled_data
long_term_model_name = labelled_data+str(long_window_size)+str(dimension)+('cbow' if cbow else 'sg')+str(scalar).replace('.','')
transaction_folder = os.path.join(cur_dir, project)
long_term_input_file_path = os.path.join(cur_dir, project, long_term_input_file_name)
long_term_model_path = os.path.join(cur_dir, project, 'models', long_term_model_name+'.txt')
# short term model
short_window_size = 5
short_term_input_file_name = 'tr_'+unlabelled_data
short_term_model_name = unlabelled_data+str(short_window_size)+str(dimension)+('cbow' if cbow else 'sg')+str(scalar).replace('.','')
short_term_input_file_path = os.path.join(cur_dir, project, short_term_input_file_name)
short_term_model_path = os.path.join(cur_dir, project, 'models', short_term_model_name+'.txt')
print('project:', project, 'cbow' if cbow else 'sg', 'epochs:', epoch, 'short term model', short_term_model_name, 'test file', test_file_name, 'scalar', scalar)
print('long term window', long_window_size, 'short term window', short_window_size, 'embedded dimension', dimension)
benchmarking_result = [list() for _ in range(epoch)]
print('alpha:', alpha, 'query_length', num_item_in_query, 'topn', top_n)
results = [list() for _ in range(1, num_bins)] #list of list
input_file_path = 'cleaned'
if gen_data:
if ('movielens' in project) or ('microsoft' in project):
# movie lens
GenTrainAndTest(transaction_folder, input_file_path, ratio=0.9, resolution='weekly', time_stamp_format=None, ignore_first_row=False)
elif 'tafeng' in project:
GenTrainAndTest(transaction_folder, input_file_path, ratio=0.9, resolution='daily', time_stamp_format='%Y-%m-%d %H:%M:%S', ignore_first_row=True, delimiter=',')
else:
# others
GenTrainAndTest(transaction_folder, input_file_path, ratio=0.9, resolution='daily', time_stamp_format='%m/%d/%Y %H:%M', ignore_first_row=True)
for _ in range(epoch):
if train:
Train(short_term_input_file_path, short_term_model_path, scalar=scalar,window=short_window_size, dim=dimension, sample=sample, use_gensim=True, workers=8, cbow=cbow, hs=hs, model_name=short_term_model_name)
# Train(long_term_input_file_path, long_term_model_path, scalar=scalar,window=long_window_size, dim=dimension, sample=sample, use_gensim=True, workers=8, cbow=cbow, hs=hs)
print('{0}\r'.format(str(_ * 100 / epoch)[:4] + '%'), end='')
for bin_num in range(1, num_bins):
start = datetime.datetime.now()
result = uuCF(short_term_model_path, test_file_path, ratio=bin_num/num_bins, topn=top_n, alpha=alpha, shuffle=True, divider=num_item_in_query, remove_already_purchased=True)
# result = HybridModel(short_term_model_path, short_term_input_file_path, test_file_path, ratio=bin_num/num_bins, topn=top_n, alpha=alpha, shuffle=True, divider=num_item_in_query, remove_already_purchased=True)
# result = iiCF(short_term_model_path, test_file_path, ratio=bin_num/num_bins, topn=top_n, with_utility=alpha, shuffle=True, divider=num_item_in_query)
end = datetime.datetime.now()
benchmarking_result[_].append(end-start)
results[bin_num - 1].append(result)
results = [ (numpy.average(x), numpy.std(x)) for x in results]
benchmarking_result = [sum(x, datetime.timedelta()) / epoch for x in benchmarking_result]
for i in range(len(results)):
avg, std = results[i]
print((i+1)/num_bins, avg)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,635 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /visualization/microsoft_browsing_dataset_preprocessing.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
def convert_to_csv(file_path, output_path):
transactions = {}
current_user = ''
out_stream = open(output_path, 'w')
with open(file_path) as f:
for line in f:
data = line.split(',')
if data[0] == 'C':
current_user = data[1].replace('\"', '')
transactions[current_user] = []
elif data[0] == 'V':
item_id = data[1]
transactions[current_user].append(item_id)
out_stream.write(','.join([current_user, item_id, '1\n']))
out_stream.close()
return transactions
if __name__ == "__main__":
output_path = 'data/microsoft_browsing.csv'
input_path = 'data/microsoft_browsing_data.dat'
convert_to_csv(input_path, output_path) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,636 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /auto_encoder_example.py | from keras.layers import Input, Dense
from keras.models import Model
dimension = 784
# this is the size of our encoded representations
encoding_dim = 36 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(dimension,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(dimension, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)
# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
if __name__ == "__main__":
import os
import numpy as np
from common.future.charles.utils.read_transaction import gen_utility_matrix, split_utility_matrix
from common.future.charles.one_layer_autoencoder import Noising, visualize_result, visualize_result_sorted
cur_dir = os.path.dirname(__file__)
transaction_file = os.path.join(cur_dir, 'toy_transaction')
# online_shopping_transaction_file = os.path.join(cur_dir, 'online_shopping', 'unlabelled')
online_shopping_transaction_file = os.path.join(cur_dir, 'movielens100k', 'unlabelled_daily')
np.set_printoptions(threshold=np.nan)
utility_data = gen_utility_matrix(online_shopping_transaction_file)
utility_matrix, idx2user, idx2item = utility_data['utility_matrix'], utility_data['idx2user'], utility_data[
'idx2item']
print(utility_matrix.shape)
x_train, x_test = split_utility_matrix(utility_matrix, 0.9)
train_noisy, test_noisy = Noising(x_train, x_test)
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
autoencoder.fit(x_train, x_train,
nb_epoch=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
visualize_result_sorted(encoder, decoder, x_test, dimension, encoding_dim)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,637 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /Word2Vec.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import gensim, random, os, datetime, collections
def GetNeighbors(vertices, model, topn=5):
ret_neighbors = []
if len(vertices) == 0:
return ret_neighbors
while True:
try:
ret_neighbors = model.most_similar(vertices, topn=topn*len(vertices))
# ret_neighbors = [x[0] for x in ret_neighbors]
break
except KeyError as e:
for i in range(len(vertices)):
if vertices[i] in str(e):
vertices.remove(vertices[i])
break
if len(vertices) == 0:
return ret_neighbors
continue
return ret_neighbors
def iiCF(model_path, test_file_path, ratio=0.9, topn=3, with_utility=False, shuffle=True, divider=None,
iterate_all=False, n_neighbours=5):
# load models
model = gensim.models.Word2Vec.load_word2vec_format(model_path, binary=False)
precision = 0.0
counter = 0.0
hidden_ratio_divider = divider
# load test file
with open(test_file_path, 'r') as f:
for line in f:
items = line.strip().split(' ')
# merge all items, convert user profile into user utility vector
items = collections.Counter(items)
items = dict(items)
utility = list(items.values())
items = list(items.keys())
indices = list(range(len(items)))
# shuffle the test receipt
if shuffle:
random.shuffle(indices)
# divide the test receipt by hidden ratio
if hidden_ratio_divider is None:
divider = int(float(len(indices)) * ratio)
else:
divider = hidden_ratio_divider
# query is remaining user profile
query = indices[:divider]
# removed is set for prediction
removed = indices[divider:]
removed = [items[x] for x in removed]
# ignore current test if either removed or query is empty
if len(removed) == 0 or len(query) == 0:
continue
# from this point, the test case is a valid test case, increment the counter by 1
counter += 1
neighbors = []
if iterate_all:
pass
else:
for item_index in query:
try:
ret = model.similar_by_word(items[item_index], topn=n_neighbours)
except KeyError:
continue
for each in ret:
# todo @charles, predict ratings for all items with items purchased by this user and sort the aggregation
# todo @charles, implement the full item collaborative filtering
if with_utility:
neighbors.append((each[0], each[1] * utility[item_index]))
else:
neighbors.append((each[0], each[1] ))
# CHECK POINT
# merge the short term and long term result
result = {}
for each in neighbors:
# each[0] is item id, each[1] is the similarity score
if each[0] not in result.keys():
result[each[0]] = each[1]
else:
result[each[0]] += each[1]
if len(result) == 0:
continue
# sort the items base on the aggregated similarity
sorted_x = sorted(result.items(), key=lambda x:x[1], reverse=True)
top_items = [x[0] for x in sorted_x]
top_items = top_items[:topn]
# convert the precision to top_n precision
current_query_precision = float(len(set(top_items).intersection(set(removed)))) / float(topn)
precision += current_query_precision
# calculate the average precision
try:
precision = (precision / counter)
except ZeroDivisionError:
# in case all test cases contain only 1 item
precision = 0
return precision
def uuCF(short_mem_model_path, test_file_path, ratio=0.9, topn=3, alpha=0.5, shuffle=True, divider=None,
remove_already_purchased=True, n_neighbours=5):
# load models
short_mem_model = gensim.models.Word2Vec.load_word2vec_format(short_mem_model_path, binary=False)
precision = 0.0
counter = 0.0
hidden_ratio_divider = divider
# load test file
with open(test_file_path, 'r') as f:
for line in f:
items = line.strip().split(' ')
# merge all items, convert user profile into user utility vector
if remove_already_purchased:
items = list(set(items))
# shuffle the test receipt
if shuffle:
random.shuffle(items)
# divide the test receipt by hidden ratio
if hidden_ratio_divider is None:
divider = int(float(len(items)) * ratio)
else:
divider = hidden_ratio_divider
# query is remaining user profile
query = items[:divider]
# removed is set for prediction
removed = items[divider:]
# ignore current test if either removed or query is empty
if len(removed) == 0 or len(query) == 0:
continue
# from this point, the test case is a valid test case, increment the counter by 1
counter += 1
neighbors = []
# get neighbours from short term memory model
short_term_model_query = query
recommendations = GetNeighbors(short_term_model_query, short_mem_model, topn=n_neighbours)
for each in recommendations:
neighbors.append((each[0], each[1] * alpha))
# merge the short term and long term result
result = {}
for each in neighbors:
# each[0] is item id, each[1] is the similarity score
if each[0] not in result.keys():
result[each[0]] = each[1]
else:
result[each[0]] += each[1]
if len(result) == 0:
continue
# sort the items base on the aggregated similarity
sorted_x = sorted(result.items(), key=lambda x:x[1], reverse=True)
top_items = [x[0] for x in sorted_x]
top_items = top_items[:topn]
# convert the precision to top_n precision
current_query_precision = float(len(set(top_items).intersection(set(removed)))) / float(topn)
precision += current_query_precision
# calculate the average precision
try:
precision = (precision / counter)
except ZeroDivisionError:
# in case all test cases contain only 1 item
precision = 0
return precision
def HybridModel(short_mem_model_path, long_mem_model_path, test_file_path, ratio=0.9, topn=3, alpha=0.5, shuffle=True, divider=None, remove_already_purchased=False):
# load models
short_mem_model = gensim.models.Word2Vec.load_word2vec_format(short_mem_model_path, binary=False)
long_mem_model = gensim.models.Word2Vec.load_word2vec_format(long_mem_model_path, binary=False)
precision = 0.0
counter = 0.0
hidden_ratio_divider = divider
# load test file
with open(test_file_path, 'r') as f:
for line in f:
items = line.strip().split(' ')
# merge all items, convert user profile into user utility vector
if remove_already_purchased:
items = list(set(items))
# shuffle the test receipt
if shuffle:
random.shuffle(items)
# divide the test receipt by hidden ratio
if hidden_ratio_divider is None:
divider = int(float(len(items)) * ratio)
else:
divider = hidden_ratio_divider
# query is remaining user profile
query = items[:divider]
# removed is set for prediction
removed = items[divider:]
# ignore current test if either removed or query is empty
if len(removed) == 0 or len(query) == 0:
continue
# from this point, the test case is a valid test case, increment the counter by 1
counter += 1
neighbors = []
# get neighbours from short term memory model
long_term_model_query = query
short_term_model_query = query
recommendations = GetNeighbors(short_term_model_query, short_mem_model, topn=topn)
for each in recommendations:
neighbors.append((each[0], each[1] * alpha))
# get neighbours from long term memory model
recommendations = GetNeighbors(long_term_model_query, long_mem_model, topn=topn)
for each in recommendations:
neighbors.append((each[0], each[1] * (1 - alpha)))
# merge the short term and long term result
result = {}
for each in neighbors:
# each[0] is item id, each[1] is the similarity score
if each[0] not in result.keys():
result[each[0]] = each[1]
else:
result[each[0]] += each[1]
if len(result) == 0:
continue
# sort the items base on the aggregated similarity
sorted_x = sorted(result.items(), key=lambda x:x[1], reverse=True)
top_items = [x[0] for x in sorted_x]
top_items = top_items[:topn]
# convert the precision to top_n precision
current_query_precision = float(len(set(top_items).intersection(set(removed)))) / float(topn)
precision += current_query_precision
# calculate the average precision
try:
precision = (precision / counter)
except ZeroDivisionError:
# in case all test cases contain only 1 item
precision = 0
return precision
def TestAlpha(model_path, test_file_path, ratio=0.9, topn=3, pick=None, shuffle=True):
word2vec_encoder = gensim.models.Word2Vec.load_word2vec_format(model_path, binary=False)
# method 1 try all words and compute score of matched items
total_score = 0.0
counter = 0.0
with open(test_file_path, 'r') as f:
for line in f:
items = line.strip().split(' ')
if shuffle:
random.shuffle(items)
# try 10 , 3
# print counter
divider = int(float(len(items)) * ratio)
remains = items[:divider]
removed = items[divider:]
if len(removed) == 0 or len(remains) == 0:
continue
counter += 1
neighbors = []
# print(len(remains))
for each in remains:
try:
answer = word2vec_encoder.similar_by_word(each, topn=5)
for pair in answer:
neighbors.append((pair[0], pair[1]))
except KeyError:
continue
# remains = GetNeighbors(remains, word2vec_encoder, topn=5)
# for each in remains:
# neighbors.append((each[0], each[1]))
# print(remains)
result = {}
for each in neighbors:
if each[0] not in result.keys():
result[each[0]] = each[1]
else:
result[each[0]] += each[1]
# second item in tuple is similarity
# order by second column
# result = Counter(neighbors)
if len(result) == 0:
continue
sorted_x = sorted(result.items(), key=lambda x:x[1], reverse=True)
if topn == 1 and pick is not None:
if pick >= len(sorted_x):
top_items = [x[0] for x in [sorted_x[-1]]]
else:
top_items = [x[0] for x in [sorted_x[pick]]]
elif topn >= 1:
# top_items = [x[0] for x in sorted_x]
top_items = [x[0] for x in sorted_x[:topn]]
else:
raise Exception('Recommendation has to be >',topn)
total_score += float(len(set(top_items).intersection(set(removed)))) / float(topn)
try:
precision = (total_score / counter)
except ZeroDivisionError:
precision = 0
return precision
def Enrich(input_path, window, scalar, threshold=100, cbow=True):
enriched_file_path = input_path+'{0}_{1}_{2}.enrich'.format(str(window), str(scalar).replace('.', ''), 'c' if cbow else 's')
enrich_f = open(enriched_file_path, 'w')
with open(input_path, 'r') as raw_file:
for line in raw_file:
items = line.strip().split(' ')
diff = len(items) - window
if scalar is None:
# too expensive
for _ in range(diff**2):
random.shuffle(items)
enrich_f.write(' '.join(items)+'\n')
else:
if threshold > diff > 0:
for _ in range(int(diff * scalar)):
random.shuffle(items)
enrich_f.write(' '.join(items)+'\n')
else:
enrich_f.write(' '.join(items)+'\n')
return enriched_file_path
class MySentences(object):
def __init__(self, fname):
self.fname = fname
def __iter__(self):
for line in open(os.path.join(self.fname)):
yield line.split()
def Embedding(input_path, output_path, window, dim, cbow=True, binary=False, scalar=1, sample=True, workers=8, threshold=100, use_gensim=False, hs=0, model_name=''):
if sample:
print('Random sampling training samples...', end='', flush=True)
input_path = Enrich(input_path, window, scalar, threshold=threshold, cbow=cbow)
print('done.', flush=True)
command = 'word2vec -train {0} -output {1} -debug 2 -size {2} -window {3} -sample 1e-4 -negative 5 -hs 0 -binary {5} -cbow {4} -threads {6}'.format(input_path, output_path, dim, window, '1' if cbow else '0', '1' if binary else '0', str(workers))
if use_gensim:
print('Building model using Gensim library...', end='', flush=True)
sentences = MySentences(input_path) # a memory-friendly iterator
model = gensim.models.Word2Vec(sentences, size=dim, window=window,
workers=workers, sg=(0 if cbow else 1),
hs=hs)
model.save_word2vec_format(output_path, binary=binary)
print('done', flush=True)
else:
print(command)
start = datetime.datetime.now()
os.system(command)
end = datetime.datetime.now()
print(end-start)
if __name__ == "__main__":
cur_dir = os.path.dirname(__file__)
root = os.path.join(cur_dir, '..')
input_file_path = os.path.join(root, 'data', 'online_shopping', 'train')
output_file_path = os.path.join(root, 'models', 'embedding', 'online_shopping.txt')
Embedding(input_file_path, output_file_path, 15, 50) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,638 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /one_layer_autoencoder.py | from keras.layers import Input, Dense, regularizers
from keras.models import Model
from math import sqrt, log
from .utils.read_transaction import gen_utility_matrix, split_utility_matrix
import numpy as np
import tensorflow as tf
def OneLayerAutoEncoder(dimension, encoding_dim, x_train, y_train, x_test, y_test, activation_1, activation_2, loss_function, epoch=30):
# this is the size of our encoded representations
# dimension = 784
# this is our input placeholder
input_matrix = Input(shape=(dimension,))
# "encoded" is the encoded representation of the input
print('activation:', activation_1, activation_2)
encoded = Dense(encoding_dim, activity_regularizer=regularizers.l2(10e-5), activation=activation_1, bias=True)(input_matrix)
# encoded = Dense(encoding_dim, activation=activation_1, bias=True)(input_matrix)
decoded = Dense(dimension, activation=activation_2, bias=True)(encoded)
autoencoder = Model(input=input_matrix, output=decoded)
encoder = Model(input=input_matrix, output=encoded)
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
# create the decoder model
print(loss_function)
autoencoder.compile(optimizer='adadelta', loss=loss_function)
autoencoder.fit(x_train, y_train,
nb_epoch=epoch,
batch_size=256,
shuffle=True,
validation_data=(x_test, y_test))
return encoder, decoder
def visualize_result(encoder, decoder, test, input_dim, embedded_dim):
# encode and decode some digits
# note that we take them from the *test* set
dim_x = int(sqrt(input_dim))
print(input_dim, dim_x)
encoded_imgs = encoder.predict(test)
decoded_imgs = decoder.predict(encoded_imgs)
# from sklearn.metrics.pairwise import cosine_similarity
# this = encoded_imgs[0]
# for other in encoded_imgs[1:]:
# print(cosine_similarity(this.reshape(1, -1), other.reshape(1, -1)))
print(test.shape)
print(decoded_imgs.shape)
print(type(decoded_imgs), type(decoded_imgs[0]))
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(30, 4))
avg = []
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
# plt.imshow(test[i].reshape(dim_x, dim_x))
plt.imshow(test[i].reshape(dim_x, dim_x))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(encoded_imgs[i].reshape(int(sqrt(embedded_dim)), int(sqrt(embedded_dim))))
# plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + 2 * n)
plt.imshow(decoded_imgs[i].reshape(dim_x, dim_x))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
print(avg)
plt.show()
def visualize_result_sorted(encoder, decoder, test, input_dim, embedded_dim):
# encode and decode some digits
# note that we take them from the *test* set
dim_x = int(sqrt(input_dim))
print(input_dim, dim_x)
encoded_imgs = encoder.predict(test)
decoded_imgs = decoder.predict(encoded_imgs)
# from sklearn.metrics.pairwise import cosine_similarity
# this = encoded_imgs[0]
# for other in encoded_imgs[1:]:
# print(cosine_similarity(this.reshape(1, -1), other.reshape(1, -1)))
print(test.shape)
print(decoded_imgs.shape)
print(type(decoded_imgs), type(decoded_imgs[0]))
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(30, 4))
avg = []
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
# plt.imshow(test[i].reshape(dim_x, dim_x))
indices = test[i].argsort()
original_matrix = test[i][indices[::-1]]
original_matrix = np.resize(original_matrix, (dim_x, dim_x))
plt.imshow(original_matrix)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(encoded_imgs[i].reshape(int(sqrt(embedded_dim)), int(sqrt(embedded_dim))))
# plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + 2 * n)
# threshold the data
# threshold = np.average(decoded_imgs[i]) + np.std(decoded_imgs[i])
# threshold = 0.9
# decoded_imgs[i][decoded_imgs[i]>threshold] = 1
# decoded_imgs[i][decoded_imgs[i]<=threshold] = 0
avg.append(np.average(decoded_imgs[i]))
# print(test[i])
# print(decoded_imgs[i], max(decoded_imgs[i]))
# exit()
# plt.imshow(decoded_imgs[i].reshape(dim_x, dim_x))
decoded_matrix = decoded_imgs[i][indices[::-1]]
decoded_matrix = np.resize(decoded_matrix, (dim_x, dim_x))
plt.imshow(decoded_matrix)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
print(avg)
plt.show()
def Noising(x_train, x_test):
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
return x_train_noisy, x_test_noisy
if __name__ == "__main__":
import os
np.set_printoptions(threshold=np.nan)
# cur_dir = os.path.dirname(__file__)
# transaction_file = os.path.join(cur_dir, 'toy_transaction')
# # online_shopping_transaction_file = os.path.join(cur_dir, 'online_shopping', 'unlabelled')
# # online_shopping_transaction_file = os.path.join(cur_dir, 'movielens100k', 'unlabelled_daily')
# # utility_data = gen_utility_matrix(online_shopping_transaction_file)
#
# online_shopping_transaction_file = os.path.join(cur_dir, 'movielens10m', 'labelled')
# utility_data = gen_utility_matrix(online_shopping_transaction_file, rating=True)
#
#
# utility_matrix, idx2user, idx2item = utility_data['utility_matrix'], utility_data['idx2user'], utility_data[
# 'idx2item']
# x_train, x_test = split_utility_matrix(utility_matrix, 0.9)
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
x_train = x_train[:1500]
x_test = x_test[:300]
train_noisy, test_noisy = Noising(x_train, x_test)
embedded_dimension = 100
# print(utility_matrix.shape)
num_samples, dimension = x_train.shape
print('training set', x_train.shape)
print('testing set', x_test.shape)
encoder, decoder = OneLayerAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'linear', 'linear', 'binary_crossentropy', epoch=10)
# encoder, decoder = OneLayerAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'relu', 'sigmoid', 'binary_crossentropy', epoch=50)
# encoder, decoder = OneLayerAutoEncoder(dimension, embedded_dimension, train_noisy, x_train, test_noisy, x_test, 'relu', 'sigmoid', 'kullback_leibler_divergence', epoch=50)
visualize_result_sorted(encoder, decoder, x_test, dimension, embedded_dimension)
| {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,639 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /density_analysis/DensityAnalysis.py | import matplotlib.pyplot as plt
import numpy as np
def prototype():
# create some randomly ddistributed data:
data = np.random.randn(10000)
print(data)
# sort the data:
data_sorted = np.sort(data)
# calculate the proportional values of samples
p = range(len(data))
# plot the sorted data:
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.plot(p, data_sorted)
ax1.set_xlabel('$p$')
ax1.set_ylabel('$x$')
ax2 = fig.add_subplot(122)
ax2.plot(data_sorted, p)
ax2.set_xlabel('$x$')
ax2.set_ylabel('$p$')
plt.show()
if __name__ == "__main__":
data = np.random.randn(10000)
print(data)
data_sorted = np.sort(data)
# calculate the proportional values of samples
p = range(len(data))
print(p) | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,640 | minghao2016/EmbeddedCollaborativeFiltering | refs/heads/master | /plot_dist.py | """
Rec-sys Embedding Rec-sys prototyping
__author__:
charles@qileap
"""
import random
import numpy as np
def sample_gen(n, forbid):
state = dict()
track = dict()
for (i, o) in enumerate(forbid):
x = track.get(o, o)
t = state.get(n-i-1, n-i-1)
state[x] = t
track[t] = x
state.pop(n-i-1, None)
track.pop(o, None)
del track
for remaining in range(n-len(forbid), 0, -1):
i = random.randrange(remaining)
yield state.get(i, i)
state[i] = state.get(remaining - 1, remaining - 1)
state.pop(remaining - 1, None)
import seaborn as sns
# correct solution:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
result = []
for i in range(1000):
# result.append(random.gauss(5, 10))
result.append(random.expovariate(4))
# result.append(random.random())
# sns.distplot(result)
def project(l1, low, high):
OldMax = max(l1)
OldMin = min(l1)
NewMax = high
NewMin = low
OldRange = (OldMax - OldMin)
NewRange = (NewMax - NewMin)
NewValue = [(((x - OldMin) * NewRange) / OldRange) + NewMin for x in l1]
return NewValue
projected = softmax(result)
projected = project(result, 0, 1)
print(max(projected), min(projected))
# print(project)
projected = sorted(projected,reverse=True)
print(projected)
projected = [int(x * 400) for x in projected]
print(projected)
# sns.plt.plot(project)
sns.distplot(projected)
sns.plt.show()
exit() | {"/stacked_auto_encoder.py": ["/utils/read_transaction.py"], "/Word2VecExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/utils/helper.py": ["/Word2Vec.py"], "/Word2VecHybridExample.py": ["/Word2Vec.py", "/utils/helper.py"], "/one_layer_autoencoder.py": ["/utils/read_transaction.py"]} |
77,647 | paolodavid/Face-Mask-Detector-TF-OpenCV | refs/heads/main | /app/main/routes.py | from base64 import b64encode
from io import BytesIO
import cv2
import numpy as np
from PIL import Image
from flask import render_template, Response, flash
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed
from werkzeug.exceptions import abort
from wtforms import FileField, SubmitField
from app.main import main_bp
from app.main.camera import Camera
# from source.test_new_images import detect_mask_in_image
from source.video_detector import detect_mask_in_frame
@main_bp.route("/")
def home_page():
return render_template("home_page.html")
def gen(camera):
while True:
frame = camera.get_frame()
frame_processed = detect_mask_in_frame(frame)
frame_processed = cv2.imencode('.jpg', frame_processed)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_processed + b'\r\n')
@main_bp.route('/video_feed')
def video_feed():
return Response(gen(
Camera()
),
mimetype='multipart/x-mixed-replace; boundary=frame')
def allowed_file(filename):
ext = filename.split(".")[-1]
is_good = ext in ["jpg", "jpeg", "png"]
return is_good
@main_bp.route("/image-mask-detector", methods=["GET", "POST"])
def image_mask_detection():
return render_template("image_detector.html",
form=PhotoMaskForm())
# form
class PhotoMaskForm(FlaskForm):
image = FileField('Choose image:',
validators=[
FileAllowed(['jpg', 'jpeg', 'png'], 'The allowed extensions are: .jpg, .jpeg and .png')])
submit = SubmitField('Detect mask')
| {"/app/main/routes.py": ["/source/video_detector.py"]} |
77,648 | paolodavid/Face-Mask-Detector-TF-OpenCV | refs/heads/main | /source/video_detector.py | import time
import numpy as np
import cv2
import imutils
from imutils.video import VideoStream
from tensorflow import keras
from tensorflow.python.keras.applications.mobilenet_v2 import preprocess_input
from source.utils import preprocess_face_frame, decode_prediction, write_bb, load_cascade_detector
model = keras.models.load_model('models/mask_mobilenet.h5')
face_detector = load_cascade_detector()
def video_mask_detector():
video = VideoStream(src=0).start()
time.sleep(1.0)
while True:
# Capture frame-by-frame
frame = imutils.resize(frame, width=650)
frame = video.read()
#frame = cv2.resize(frame, (500, 355))
frame = detect_mask_in_frame(frame)
# Display the resulting frame
# show the output frame
cv2.imshow("Mask detector", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# cleanup
cv2.destroyAllWindows()
video.stop()
def detect_mask_in_frame(frame):
frame = imutils.resize(frame, width=650)
#frame = cv2.resize(frame, (500, 355))
# convert an image from one color space to another
# (to grayscale)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(40, 40),
flags=cv2.CASCADE_SCALE_IMAGE,
)
faces_dict = {"faces_list": [],
"faces_rect": []
}
for rect in faces:
(x, y, w, h) = rect
face_frame = frame[y:y + h, x:x + w]
# preprocess image
face_frame_prepared = preprocess_face_frame(face_frame)
faces_dict["faces_list"].append(face_frame_prepared)
faces_dict["faces_rect"].append(rect)
if faces_dict["faces_list"]:
faces_preprocessed = preprocess_input(np.array(faces_dict["faces_list"]))
preds = model.predict(faces_preprocessed)
for i, pred in enumerate(preds):
mask_or_not, confidence = decode_prediction(pred)
write_bb(mask_or_not, confidence, faces_dict["faces_rect"][i], frame)
return frame
if __name__ == '__main__':
video_mask_detector()
| {"/app/main/routes.py": ["/source/video_detector.py"]} |
77,649 | eduardoparaja/black-belt-modules | refs/heads/master | /Data/triangles.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 20:46:57 2018
@author: edup_
"""
triangle_definitions = [{"base": 10, "height":2},{"base": 4, "height":3},{"base": 15, "height":12},{"base": 1, "height":8},
{"base": 9, "height": 6}, {"base": 8, "height":6}, {"base":24, "height":12}, {"base":7, "height":43}, {"base":12, "height":15}]
| {"/main.py": ["/Data/__init__.py"]} |
77,650 | eduardoparaja/black-belt-modules | refs/heads/master | /Data/__init__.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 20:08:31 2018
@author: edup_
"""
| {"/main.py": ["/Data/__init__.py"]} |
77,651 | eduardoparaja/black-belt-modules | refs/heads/master | /main.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 20:52:24 2018
@author: edup_
"""
from Utils import functions
from Data import triangles
for dictionary in triangles.triangle_definitions:
print(functions.area_triangle(dictionary["base"], dictionary["height"])) | {"/main.py": ["/Data/__init__.py"]} |
77,652 | eduardoparaja/black-belt-modules | refs/heads/master | /Utils/Sin título 6.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 21:05:31 2018
@author: edup_
"""
sys.path
| {"/main.py": ["/Data/__init__.py"]} |
77,653 | eduardoparaja/black-belt-modules | refs/heads/master | /Utils/functions.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 20:08:33 2018
@author: edup_
"""
def area_triangle(base, height):
return base*height/2 | {"/main.py": ["/Data/__init__.py"]} |
77,662 | gellyfisher/masterproef | refs/heads/master | /prices.py | import math
import numpy
def choleskyDecomposition(matrix):
ret=numpy.zeros(matrix.shape,dtype='float64')
for i in range(0,matrix.shape[0]):
for j in range(0,i):
ret[i,j]=(matrix[i,j]-sum([ret[i,k]*ret[j,k] for k in range(0,j)]))/ret[j,j]
ret[i,i]=math.sqrt(matrix[i,i]-sum([ret[i,k]*ret[i,k] for k in range(0,i)]))
return ret;
class Probability:
def __init__(self,correlations,initials,drifts,volatilities,constant=True):
self.correlations = correlations
self.G = choleskyDecomposition(correlations)
self.motions=[]
self.prices=[]
self.antithetic = False # Om bij te houden dat of we nieuwe data moeten genereren of we gewoon -self.data kunnen gebruiken.
for i in range(correlations.shape[0]):
self.motions.append(Motion(self))
self.prices.append(Price(initials[i],drifts[i],volatilities[i],self.motions[i]))
self.constant=constant
self.getCorrelationMatrix={} #gebruiken we om sneller getCorrelation te berekenen
for i in range(len(self.prices)):
for j in range(len(self.prices)):
self.getCorrelationMatrix[(self.prices[i],self.prices[j])] = self.correlations[i,j]
def getCorrelation(self,price1,price2):
return self.getCorrelationMatrix[(price1,price2)]
# return self.correlations[self.prices.index(price1),self.prices.index(price2)]
#sampled de prijzen zelf
def samplePrices(self,T):
if (self.constant):
self.sampleMotions(T)
for price in self.prices:
price.value=price.initial * math.exp(
price.volatility*price.motion.getSampled()
+(price.drift-0.5*price.volatility**2)*T
)
else:
intervals=100 #aantal intervallen waarover we de niet constante sigma beschouwen
for price in self.prices:
price.value=price.initial
t=0
for i in range(intervals):
self.sampleMotions(T/intervals,False)
for price in self.prices:
price.value=price.value * math.exp(
price.volatility(t)*price.motion.getSampled()
+(price.drift-0.5*price.volatility(t)**2)*(T/intervals)
)
t+=T/intervals
return [price.value for price in self.prices];
#sampled de Brownse bewegingen
def sampleMotions(self,t,antithetic=True):
n=len(self.motions)
if (self.antithetic and antithetic):
self.data = -self.data
self.antithetic=False
else:
self.data = numpy.random.randn(n,1)*math.sqrt(t);
self.antithetic = True
correlatedData = self.G.dot(self.data);
for i in range(0,n):
self.motions[i].setSampled(correlatedData[i,0])
def simulate(self,contract,r,T,amount=100000):
total=0
total2=0
for _ in range(amount):
self.samplePrices(T)
temp = math.exp(-r*T) * contract.payoff()
total += temp
total2 += temp**2
simulated=total/amount
variance=total2/amount-simulated**2
error=math.sqrt(variance/amount)
return (simulated,error)
#implementeert de functionaliteiten van een Brownse beweging
class Motion:
def __init__(self,prob):
self.prob = prob
def setSampled(self,value):
self.sampled=value
def getSampled(self):
return self.sampled;
#implementeert de functionaliteiten van een geometrische Brownse beweging
class Price:
def __init__(self, initial, drift, volatility, motion):
self.initial = initial
self.drift = drift
self.volatility = abs(volatility) #we gaan er van uit dat onze volatiliteit altijd positief is.
self.motion = motion
self.value = initial
def partialDerivative(self,t,price):
if price==self:
return 1
else:
return 0
def approximate(self,t):
if (t>0):
return self.initial * math.exp(self.drift*t)
else:
return self.initial
| {"/tests.py": ["/prices.py", "/basket.py", "/exchange.py", "/option.py", "/rainbow.py"], "/rainbow.py": ["/prices.py", "/basket.py"], "/basket.py": ["/prices.py", "/option.py"], "/option.py": ["/prices.py"], "/results.py": ["/prices.py", "/basket.py", "/rainbow.py"]} |
77,663 | gellyfisher/masterproef | refs/heads/master | /tests.py | from prices import *
from basket import *
from exchange import *
from option import *
from rainbow import *
def testCholesky():
correlations = numpy.matrix('1 0.8 0.6 0.2; 0.8 1 0.55 0.65; 0.6 0.55 1 0.57; 0.2 0.65 0.57 1',dtype='float64')
ret=choleskyDecomposition(correlations);
assert(numpy.isclose(ret.dot(ret.transpose()),correlations,1e-09).all()) #er is geen exacte gelijkheid door afrondingfoutjes.
n=19
temp=numpy.random.rand(n,n)
correlations=0.5*(temp+temp.transpose()) #maak de matrix symmetrisch
numpy.fill_diagonal(correlations,n); #zorg ervoor dat de matrix zeker positief definiet is.
ret=choleskyDecomposition(correlations);
assert(numpy.isclose(ret.dot(ret.transpose()),correlations,1e-09).all()) #er is geen exacte gelijkheid door afrondingfoutjes.
def testCorrelated():
n = 4
amount = 100000 #aantal iteraties
correlations = numpy.matrix('1 0.8 0.6 0.2; 0.8 1 0.55 0.65; 0.6 0.55 1 0.57; 0.2 0.65 0.57 1',dtype='float64')
G = choleskyDecomposition(correlations)
for n1 in range(0,n):
for n2 in range(n1,n):
total=0 #we schatten de correlatie tussen W_n1 en W_n2
for _ in range(0,amount):
data = numpy.random.randn(n,1);
correlatedData = G.dot(data);
total+=correlatedData[n1,0]*correlatedData[n2,0]
#de relatieve tolerantie mag niet te hoog zijn omdat de gesampelde verwachtingswaarde nooit exact zal zijn.
assert(math.isclose(total/amount,correlations[n1,n2],rel_tol=0.05))
def testCallOption():
correlations = numpy.matrix('1',dtype='float64')
volatilities = [0.30]
drifts = [0.04]
initials = [75]
prob=Probability(correlations,initials,drifts,volatilities)
T=1
call=Option(60,prob.prices[0],T)
amount=100000
simulated,error = prob.simulate(call,drifts[0],T,amount)
assert(math.isclose(total/amount,call.approximate(0),rel_tol=0.01))
def testCEOs():
correlations = numpy.matrix('1 0.5; 0.5 1',dtype='float64')
amount=1000000
diffs=[]
volitiltiesArray= [[0.20]*2,[0.50]*2]
driftsArray = [[0.04]*2,[0]*2]
initialsArray = [[100,80],[100,90],[100,100]]
TArray = [0.08,0.5]
KArray = [60,80,100,120]
for volatilities in volitiltiesArray:
for drifts in driftsArray:
for initials in initialsArray:
for K in KArray:
for T in TArray:
prob=Probability(correlations,initials,drifts,volatilities)
call1=Option(K,prob.prices[0],T)
call2=Option(K,prob.prices[1],T)
ceo=CEO(prob,call1,call2)
simulated,error = prob.simulate(ceo,drifts[0],T,amount)
approx=ceo.approximate(0)
print(volatilities,drifts,initials,K,T)
print(total/amount)
print(approx)
print(approx-(total/amount))
diffs.append(approx-(total/amount))
print()
print()
def testCEO():
correlations = numpy.matrix('1 0.5; 0.5 1',dtype='float64')
amount=1000000
volatilities = [0.20]*2
drifts = [0.04]*2
initials = [100,80]
T = 0.08
K = 60
prob=Probability(correlations,initials,drifts,volatilities)
call1=Option(K,prob.prices[0],T)
call2=Option(K,prob.prices[1],T)
ceo=CEO(prob,call1,call2)
simulated,error = prob.simulate(ceo,drifts[0],T,amount)
approx=ceo.approximate(0)
print(volatilities,drifts,initials,K,T)
print(total/amount)
print(approx)
print(ceo.approximate2(0))
print(ceo.approximate3(0))
print()
def testCEO2():
correlations = numpy.matrix('1 0.5; 0.5 1',dtype='float64')
amount=100000
volatilities = [0.50]*2
drifts = [0.05]*2
initials = [100,100]
T = 1
K = 50
prob=Probability(correlations,initials,drifts,volatilities)
call1=Option(K,prob.prices[0],T)
call2=Option(-K,prob.prices[1],T)
ceo1=CEO(prob,call1,call2)
call4=Option(-K,prob.prices[1],T,-1)
call3=Option(K,prob.prices[0],T,-1)
ceo2=CEO(prob,call4,call3)
basket=Basket(prob,2*K,prob.prices,T,[1,-1])
print(ceo1.approximate(0)+ceo2.approximate(0))
print(basket.approximate(0))
totalCeo = 0
totalBasket = 0;
for _ in range(amount):
prob.samplePrices(T)
totalCeo+=math.exp(-drifts[0]*T)*(ceo1.payoff()+ceo2.payoff())
totalBasket+=math.exp(-drifts[0]*T)*basket.payoff()
print(totalCeo/amount)
print(totalBasket/amount)
print()
def testExchange():
correlations = numpy.matrix('1 0.5; 0.5 1',dtype='float64')
volatilities = [0.30,0.25]
drifts = [0.04]*2
initials = [75,65]
prob=Probability(correlations,initials,drifts,volatilities)
T=1
xchg=Exchange(prob,prob.prices[0],prob.prices[1],T)
amount=2000000
simulated,error = prob.simulate(xchg,drifts[0],T,amount)
print(total/amount)
print(xchg.approximate(0))
def testExchangeAsBasket():
N=2
correlations = numpy.ones((N,N))*0.5+numpy.diag([0.5]*N)
volatilities = [0.2] * N
drifts = [0.05] * N
initials = [100] *N
prob=Probability(correlations,initials,drifts,volatilities)
T=1
exchg = Exchange(prob,prob.prices[0],prob.prices[1],T)
basket=Basket(prob,0,prob.prices,T,[1,-1],method="integral") #exchange optie
amount=100000
print(exchg.approximate(0),*prob.simulate(exchg,drifts[0],T,amount))
print(basket.approximate(0),*prob.simulate(basket,drifts[0],T,amount))
def nonConstantTest():
correlations = numpy.matrix('1',dtype='float64')
volatilities = [lambda t: math.sqrt(0.18*t)]
drifts = [0.04]
initials = [75]
prob=Probability(correlations,initials,drifts,volatilities,False)
T=1
call=Option(60,prob.prices[0],T)
amount=100000
simulated,error = prob.simulate(call,drifts[0],T,amount)
# assert(math.isclose(total/amount,call.approximate(0),rel_tol=0.01))
print(total/amount,call.approximate(0))
def testBasket():
N=3
correlations = numpy.ones((N,N))*0.5+numpy.diag([0.5]*N)
volatilities = [0.2] * N
drifts = [0.05] * N
initials = [100] *N
prob=Probability(correlations,initials,drifts,volatilities)
T=1
K=100
basket=Basket(prob,K,prob.prices,T,[1,1,-1],1e20) #exchange optie
print(basket.approximate(0))
def testProductLogNormals():
# rho12=0.5
# rho13=0.4
# rho23=0.2
rho=0.5
correlations = numpy.ones((2,2))*rho+numpy.diag([1-rho]*2)
volatilities = [0.2,0.4]
drifts = [0.05] * 2
initials = [100] * 2
prob=Probability(correlations,initials,drifts,volatilities)
r=drifts[0]
T=1
sigma1=volatilities[0]
sigma2=volatilities[1]
amount=200000
total=0
for _ in range(amount):
prob.samplePrices(T)
total+=prob.prices[0].value*prob.prices[1].value
print("simulated",total/amount)
mu = (2*r -0.5*sigma1**2 - 0.5*sigma2**2)*T
sigma = math.sqrt((sigma1**2 + sigma2**2 + 2 * rho * sigma1 * sigma2)*T)
print("expectation",prob.prices[0].initial*prob.prices[1].initial * math.exp(mu+0.5*sigma**2))
print("naief",prob.prices[0].approximate(T)*prob.prices[1].approximate(T))
if __name__=="__main__":
numpy.random.seed(3141592653)
# testCholesky()
# testCorrelated()
# testCallOption()
# testExchange()
# testCEO2()
# testCEOs()
testExchangeAsBasket()
# nonConstantTest()
# testBasket()
# testProductLogNormals() | {"/tests.py": ["/prices.py", "/basket.py", "/exchange.py", "/option.py", "/rainbow.py"], "/rainbow.py": ["/prices.py", "/basket.py"], "/basket.py": ["/prices.py", "/option.py"], "/option.py": ["/prices.py"], "/results.py": ["/prices.py", "/basket.py", "/rainbow.py"]} |
77,664 | gellyfisher/masterproef | refs/heads/master | /rainbow.py | import math
import numpy
from scipy.stats import norm
import scipy.integrate as integrate
from prices import *
from basket import *
class Rainbow:
def __init__(self,prob,strike,prices,maturity,nu=1,method="calibrate",xAmount=100,productmethod=False):
self.prob = prob
self.strike=strike
self.prices = prices
self.maturity = maturity
self.nu = nu
self.method = method
self.xAmount = xAmount
self.productmethod = productmethod
self.split()
self.dplusminCache = {}
self.partialCache = {}
self.approxCache = {}
self.volatilitiesCache = {}
self.correlationsCache = {}
def payoff(self,approx=False):
if not approx:
temp = max([self.prices[i].value for i in range(len(self.prices))]) - self.strike
else:
temp = max([self.prices[i].approximate(self.maturity) for i in range(len(self.prices))]) - self.strike
return max(0,temp)
def split(self):
N=len(self.prices)
prices1 = self.prices[:N//2]
prices2 = self.prices[N//2:];
if (N>=4):
self.F1=Rainbow(self.prob,0,prices1,self.maturity,self.nu,self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.F2=Rainbow(self.prob,0,prices2,self.maturity,self.nu,self.method,xAmount=self.xAmount,productmethod=self.productmethod)
elif (N==3):
self.F1=prices1[0]
self.F2=Rainbow(self.prob,0,prices2,self.maturity,self.nu,self.method,xAmount=self.xAmount,productmethod=self.productmethod)
else:
self.F1=prices1[0]
self.F2=prices2[0]
def getVolatilities(self,t,numeric=True):
if ((t,numeric) in self.volatilitiesCache):
return self.volatilitiesCache[(t,numeric)]
elif self.method=="calibrate" or not numeric:
N = len(self.prices)
r = self.prices[0].drift
tau = self.maturity-t
sigmat = [0, 0]
m = [0,N//2,N]
F=[self.F1,self.F2]
for i in range(2):
for j in range(m[i],m[i+1]):
for k in range(m[i],m[i+1]):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
pdvFj = F[i].partialDerivative(t,self.prices[j])
pdvFk = F[i].partialDerivative(t,self.prices[k])
if not self.productmethod:
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
sigmat[i] += pdvFj * pdvFk * rhokj * Sk * Sj * sigmaj * sigmak
else:
prod = self.prob.prices[0].initial * self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
sigmat[i] += pdvFj * pdvFk * rhokj * prod * sigmaj * sigmak
F1t=self.F1.approximate(t)
F2t=self.F2.approximate(t)
sigmat1 = 0 if F1t==0 else self.nu * math.sqrt(sigmat[0]) / F1t
sigmat2 = 0 if F2t==0 else self.nu * math.sqrt(sigmat[1]) / F2t
elif (t==self.maturity):
sigmat1,sigmat2=self.getVolatilities(t,False)
else:
istart = int(round(t*self.xAmount/self.maturity))
xArray=[(i*self.maturity)/self.xAmount for i in range(istart,self.xAmount)]
#het kan door een numerieke fout gebeuren dat (i*self.maturity)/i niet gelijk is aan self.maturity wat voor problemen zorgt. daarom voegen we dit appart toe
xArray.append(self.maturity)
sigmat1=math.sqrt(integrate.simps([self.getVolatilities(x,False)[0]**2 for x in xArray],xArray)/(self.maturity-t))
sigmat2=math.sqrt(integrate.simps([self.getVolatilities(x,False)[1]**2 for x in xArray],xArray)/(self.maturity-t))
self.volatilitiesCache[(t,numeric)]=(sigmat1,sigmat2)
return (sigmat1,sigmat2)
def getCorrelations(self,t,numeric=True):
if ((t,numeric) in self.correlationsCache):
return self.correlationsCache[(t,numeric)]
elif self.method=="calibrate" or not numeric:
sigmat1,sigmat2 = self.getVolatilities(t)
N=len(self.prices)
r=self.prices[0].drift
delta12 = 0
for j in range(0,N//2):
for k in range(N//2,N):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
pdvFj = self.F1.partialDerivative(t,self.prices[j])
pdvFk = self.F2.partialDerivative(t,self.prices[k])
if not self.productmethod:
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
delta12 += pdvFj * pdvFk * rhokj * Sk * Sj * sigmaj * sigmak
else:
prod = self.prob.prices[0].initial * self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
delta12 += pdvFj * pdvFk * rhokj * prod * sigmaj * sigmak
F1t=self.F1.approximate(t)
F2t=self.F2.approximate(t)
if (sigmat1==0 or sigmat2==0):
delta12=0
gammax = sigmat1+sigmat2
else:
delta12 = self.nu**2 *delta12/(sigmat1*sigmat2*F1t*F2t)
gammax = math.sqrt(sigmat1**2+sigmat2**2 - 2*sigmat1*sigmat2*delta12)
elif (t==self.maturity):
delta12,gammax=self.getCorrelations(t,False)
else:
istart = int(round(t*self.xAmount/self.maturity))
xArray=[(i*self.maturity)/self.xAmount for i in range(istart,self.xAmount)]
#het kan door een numerieke fout gebeuren dat (i*self.maturity)/i niet gelijk is aan self.maturity wat voor problemen zorgt. daarom voegen we dit appart toe
xArray.append(self.maturity)
delta12 = 0 #wordt niet meer gebruikt in dit geval
try:
gammax = math.sqrt(integrate.simps([self.getVolatilities(x,False)[0]**2 + self.getVolatilities(x,False)[1]**2 - 2* self.getVolatilities(x,False)[0]*self.getVolatilities(x,False)[1]*self.getCorrelations(x,False)[0] for x in xArray],xArray)/(self.maturity-t))
except:
gammax = 0 #indien we wortel van een negatief getal kregen.
self.correlationsCache[(t,numeric)] = (delta12,gammax)
return (delta12,gammax)
def getDPlusMin(self,t):
if (t in self.dplusminCache):
return self.dplusminCache[t]
else:
_,gammax = self.getCorrelations(t)
tau = self.maturity-t
F1t=self.F1.approximate(t)
F2t=self.F2.approximate(t)
if (F1t>0 and F2t>0 and tau>0):
dplus = (math.log(F1t/F2t) + tau*(gammax**2)/2)/(gammax*math.sqrt(tau))
dmin = (math.log(F1t/F2t) - tau*(gammax**2)/2)/(gammax*math.sqrt(tau))
elif (F2t==0 and F1t>0 and tau>0):
dplus = math.inf
dmin = -math.inf
elif tau==0 and F1t > F2t:
dplus = math.inf
dmin = math.inf
else:
dplus = -math.inf
dmin = -math.inf
self.dplusminCache[t] = (dplus,dmin)
return (dplus,dmin)
def approximate(self,t):
if (t in self.approxCache):
return self.approxCache[t]
elif t==self.maturity:
return self.payoff(True)
elif self.strike==0:
dplus,dmin = self.getDPlusMin(t)
self.approxCache[t]=self.F1.approximate(t) * norm.cdf(dplus) + self.F2.approximate(t) * (1-norm.cdf(dmin))
return self.approxCache[t]
else:
N=len(self.prices)
_,sigmat2=self.getVolatilities(t)
sigmatx=0
dplus,dmin=self.getDPlusMin(t)
#we bepalen de partiele afgeleide van X adhv 2.4.7
pdvX=[]
for j in range(0,N):
pdvX.append(self.F1.partialDerivative(t,self.prices[j]) * norm.cdf(dplus) - self.F2.partialDerivative(t,self.prices[j]) * norm.cdf(dmin))
#sigmatx uit lemma 2.4.4
for j in range(0,N):
for k in range(0,N):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
if not self.productmethod:
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
sigmatx += pdvX[j] * pdvX[k] * rhokj * Sk * Sj * sigmaj * sigmak
else:
prod = self.prob.prices[0].initial * self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
sigmatx += pdvX[j] * pdvX[k] * rhokj * prod * sigmaj * sigmak
#Uit stelling 2.4.6
Xt = self.F1.approximate(t) * norm.cdf(dplus) - self.F2.approximate(t) * norm.cdf(dmin)
if Xt!=0:
sigmatx = math.sqrt(sigmatx)/Xt
else:
sigmatx = 0
#delta_x2 uit lemma 2.4.5
deltax2=0
for j in range(0,N):
for k in range(N//2,N):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
if not self.productmethod:
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
deltax2 += pdvX[j] * self.F2.partialDerivative(t,self.prices[k]) * rhokj * Sk * Sj * sigmaj * sigmak
else:
prod = self.prob.prices[0].initial * self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
deltax2 += pdvX[j] * self.F2.partialDerivative(t,self.prices[k]) * rhokj * prod * sigmaj * sigmak
if (sigmatx!=0 and sigmat2!=0):
deltax2 = deltax2/(sigmat2 * sigmatx * Xt * self.F2.approximate(t))
else:
deltax2=0
correlations = numpy.ones((2,2))*deltax2+numpy.diag([1-deltax2]*2)
volatilities = [sigmat2,sigmatx]
drifts = [self.prices[0].drift] * 2
initials = [self.F2.approximate(0), Xt]
prob = Probability(correlations,initials,drifts,volatilities)
basket=Basket(prob,self.strike,prob.prices,self.maturity,[1,1],nu=self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.approxCache[t]=basket.approximate(t)
return self.approxCache[t]
def partialDerivative(self,t,price):
#dit moeten we in feite enkel kunnen berekenen indien K=0
if (price not in self.prices):
return 0
else:
if ((t,price) in self.partialCache):
return self.partialCache[(t,price)]
else:
dplus,dmin = self.getDPlusMin(t)
self.partialCache[(t,price)]=self.F1.partialDerivative(t,price) * norm.cdf(dplus) + self.F2.partialDerivative(t,price) * (1-norm.cdf(dmin))
return self.partialCache[(t,price)]
if __name__=="__main__":
numpy.random.seed(3141592653)
N=5
rho=0.5
correlations = numpy.ones((N,N))*rho+numpy.diag([1-rho]*N)
volatilities = [0.3,0.4,0.5,0.5,0.7]
drifts = [0.05] * N
initials = [90,92,94,96,98,100]
prob = Probability(correlations,initials,drifts,volatilities)
T = 1
K = 100
rainbow=Rainbow(prob,K,prob.prices,T,method="integral")
amount=100000
simulated,error = prob.simulate(rainbow,drifts[0],T,amount)
print("simulated, std error",simulated,error)
print()
print(rainbow.approximate(0))
print() | {"/tests.py": ["/prices.py", "/basket.py", "/exchange.py", "/option.py", "/rainbow.py"], "/rainbow.py": ["/prices.py", "/basket.py"], "/basket.py": ["/prices.py", "/option.py"], "/option.py": ["/prices.py"], "/results.py": ["/prices.py", "/basket.py", "/rainbow.py"]} |
77,665 | gellyfisher/masterproef | refs/heads/master | /basket.py | import math
import numpy
from scipy.stats import norm
from scipy.stats import gamma
import scipy.integrate as integrate
from prices import *
from option import *
class Basket:
def __init__(self,prob,strike,prices,maturity,thetas,nu=1,method="calibrate",xAmount=100,productmethod=False):
self.prob = prob
self.prices = prices
self.thetas = thetas
self.maturity = maturity
self.nu=nu
self.method = method
self.xAmount = xAmount
self.productmethod = productmethod
# we hebben een cache om te voorkomen dat we heel vaak dezelfde berekeningen opnieuw doen.
# we houden als key de tijd bij en als value de relevante waarde
self.dplusminCache = {}
self.partialCache = {}
self.approxCache = {}
self.volatilitiesCache = {}
self.correlationsCache = {}
self.setStrike(strike)
#nadat de strike of nu verandert dan veranderen alle resultaten dus we mogen de cache zeker niet hergebruiken
def resetCache(self):
self.dplusminCache = {}
self.partialCache = {}
self.approxCache = {}
self.volatilitiesCache = {}
self.correlationsCache = {}
def setNu(self,nu):
self.nu=nu
self.resetCache()
self.split()
def setStrike(self,strike):
if isinstance(strike, list):
self.strikes = strike
else:
m=numpy.count_nonzero(self.thetas)
self.strikes = []
for i in range(len(self.thetas)):
if self.thetas[i]==0:
self.strikes.append(0)
else:
self.strikes.append(strike/(self.thetas[i]*m))
self.resetCache()
self.split()
def payoff(self,approx=False):
if not approx:
temp = sum([self.thetas[i]*(self.prices[i].value-self.strikes[i]) for i in range(len(self.prices))])
else:
temp = sum([self.thetas[i]*(self.prices[i].approximate(self.maturity)-self.strikes[i]) for i in range(len(self.prices))])
return max(0,temp)
def split(self):
N=len(self.thetas)
thetas1 = self.thetas[:N//2]
strikes1 = self.strikes[:N//2]
prices1 = self.prices[:N//2]
thetas2=self.thetas[N//2:]
strikes2 = self.strikes[N//2:]
prices2 = self.prices[N//2:]
if (N>=4):
self.C1=Basket(self.prob,strikes1,prices1,self.maturity,thetas1,self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.C2=Basket(self.prob,strikes2,prices2,self.maturity,[-1*t for t in thetas2],self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.P1=Basket(self.prob,strikes1,prices1,self.maturity,[-1*t for t in thetas1],self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.P2=Basket(self.prob,strikes2,prices2,self.maturity,thetas2,self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
elif (N==3):
self.C1=Option(strikes1[0],prices1[0],self.maturity,thetas1[0])
self.C2=Basket(self.prob,strikes2,prices2,self.maturity,[-1*t for t in thetas2],self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
self.P1=Option(strikes1[0],prices1[0],self.maturity,-thetas1[0])
self.P2=Basket(self.prob,strikes2,prices2,self.maturity,thetas2,self.nu,method=self.method,xAmount=self.xAmount,productmethod=self.productmethod)
elif (N==2):
self.C1=Option(strikes1[0],prices1[0],self.maturity,thetas1[0])
self.C2=Option(strikes2[0],prices2[0],self.maturity,-thetas2[0])
self.P1=Option(strikes1[0],prices1[0],self.maturity,-thetas1[0])
self.P2=Option(strikes2[0],prices2[0],self.maturity,thetas2[0])
#N==1 is een gewone optie.
return N
def getVolatilities(self,t,numeric=True):
if ((t,numeric) in self.volatilitiesCache):
return self.volatilitiesCache[(t,numeric)]
elif self.method=="calibrate" or not numeric:
N = len(self.thetas)
r = self.prices[0].drift
tau = self.maturity-t
#lemma 2.3.2
sigmat = [0, 0]
psit = [0, 0]
m = [0,N//2,N]
C=[self.C1,self.C2]
P=[self.P1,self.P2]
for i in range(2):
for j in range(m[i],m[i+1]):
for k in range(m[i],m[i+1]):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
pdvCj = C[i].partialDerivative(t,self.prices[j])
pdvCk = C[i].partialDerivative(t,self.prices[k])
pdvPj = P[i].partialDerivative(t,self.prices[j])
pdvPk = P[i].partialDerivative(t,self.prices[k])
if not self.productmethod:
#hier gebruiken we E[Sk] * E[Sj]
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
sigmat[i] += pdvCj * pdvCk * rhokj * Sk * Sj * sigmaj * sigmak
psit[i] += pdvPj * pdvPk * rhokj * Sk * Sj * sigmaj * sigmak
else:
#we bepalen E[Sk * Sj]
prod = self.prob.prices[0].initial*self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
sigmat[i] += pdvCj * pdvCk * rhokj * prod * sigmaj * sigmak
psit[i] += pdvPj * pdvPk * rhokj * prod * sigmaj * sigmak
C1t=self.C1.approximate(t)
C2t=self.C2.approximate(t)
P1t=self.P1.approximate(t)
P2t=self.P2.approximate(t)
sigmat1 = 0 if C1t==0 else self.nu * math.sqrt(sigmat[0]) / C1t
sigmat2 = 0 if C2t==0 else self.nu * math.sqrt(sigmat[1]) / C2t
psit1 = 0 if P1t==0 else self.nu * math.sqrt(psit[0]) / P1t
psit2 = 0 if P2t==0 else self.nu * math.sqrt(psit[1]) / P2t
elif (t==self.maturity):
sigmat1,sigmat2,psit1,psit2=self.getVolatilities(t,False)
else:
istart = round(t*self.xAmount/self.maturity)
xArray=[(i*self.maturity)/self.xAmount for i in range(istart,self.xAmount)]
#het kan door een numerieke fout gebeuren dat (i*self.maturity)/i niet gelijk is aan self.maturity wat voor problemen zorgt. daarom voegen we dit appart toe
xArray.append(self.maturity)
# print(xArray)
sigmat1=math.sqrt(integrate.simps([self.getVolatilities(x,False)[0]**2 for x in xArray],xArray)/(self.maturity-t))
sigmat2=math.sqrt(integrate.simps([self.getVolatilities(x,False)[1]**2 for x in xArray],xArray)/(self.maturity-t))
psit1=math.sqrt(integrate.simps([self.getVolatilities(x,False)[2]**2 for x in xArray],xArray)/(self.maturity-t))
psit2=math.sqrt(integrate.simps([self.getVolatilities(x,False)[3]**2 for x in xArray],xArray)/(self.maturity-t))
self.volatilitiesCache[(t,numeric)]=(sigmat1,sigmat2,psit1,psit2)
return (sigmat1,sigmat2,psit1,psit2)
def getCorrelations(self,t,numeric=True):
if ((t,numeric) in self.correlationsCache):
return self.correlationsCache[(t,numeric)]
elif self.method=="calibrate" or not numeric:
sigmat1,sigmat2,psit1,psit2 = self.getVolatilities(t)
N=len(self.thetas)
r=self.prices[0].drift
beta12 = 0
gamma12 = 0
for j in range(0,N//2):
for k in range(N//2,N):
sigmaj = self.prices[j].volatility
sigmak = self.prices[k].volatility
rhokj = self.prob.getCorrelation(self.prices[j],self.prices[k])
pdvCj = self.C1.partialDerivative(t,self.prices[j])
pdvCk = self.C2.partialDerivative(t,self.prices[k])
pdvPj = self.P1.partialDerivative(t,self.prices[j])
pdvPk = self.P2.partialDerivative(t,self.prices[k])
if not self.productmethod:
#hier gebruiken we E[Sk] * E[Sj]
Sj = self.prices[j].approximate(t)
Sk = self.prices[k].approximate(t)
beta12 += pdvCj * pdvCk * rhokj * Sk * Sj * sigmaj * sigmak
gamma12 += pdvPj * pdvPk * rhokj * Sk * Sj * sigmaj * sigmak
else:
#we bepalen E[Sk * Sj]
prod = self.prob.prices[0].initial*self.prob.prices[1].initial * math.exp((2*r + sigmaj*sigmak*rhokj)*t)
beta12 += pdvCj * pdvCk * rhokj * prod * sigmaj * sigmak
gamma12 += pdvPj * pdvPk * rhokj * prod * sigmaj * sigmak
C1t=self.C1.approximate(t)
C2t=self.C2.approximate(t)
P1t=self.P1.approximate(t)
P2t=self.P2.approximate(t)
if (C1t*sigmat1*C2t*sigmat2==0):
beta12=0
gamma1 = sigmat1+sigmat2
else:
beta12 = self.nu**2 *beta12/(sigmat1*sigmat2*C1t*C2t)
if (sigmat1**2+sigmat2**2 - 2*sigmat1*sigmat2*beta12>=0):
gamma1 = math.sqrt(sigmat1**2+sigmat2**2 - 2*sigmat1*sigmat2*beta12)
else:
gamma1=0
if (P1t*psit1*P2t*psit2==0):
gamma12=0
gamma2 = psit1+psit2
else:
gamma12 = self.nu**2 * gamma12/(psit1*psit2*P1t*P2t)
if (psit1**2+psit2**2-2*psit1*psit2*gamma12>=0):
gamma2 = math.sqrt(psit1**2+psit2**2-2*psit1*psit2*gamma12)
else:
gamma2 = 0
elif (t==self.maturity):
beta12,gamma12,gamma1,gamma2=self.getCorrelations(t,False)
else:
istart = round(t*self.xAmount/self.maturity)
xArray=[(i*self.maturity)/self.xAmount for i in range(istart,self.xAmount)]
#het kan door een numerieke fout gebeuren dat (i*self.maturity)/i niet gelijk is aan self.maturity wat voor problemen zorgt. daarom voegen we dit appart toe
xArray.append(self.maturity)
beta12 = 0
gamma12 = 0 #we gebruiken deze waarden niet in het geval dat numeric=True
try:
gamma1=math.sqrt(integrate.simps([self.getVolatilities(x,False)[0]**2 + self.getVolatilities(x,False)[1]**2 - 2* self.getVolatilities(x,False)[0]*self.getVolatilities(x,False)[1]*self.getCorrelations(x,False)[0] for x in xArray],xArray)/(self.maturity-t))
except ValueError: #indien de wortel negatief is...
gamma1=0
try:
gamma2=math.sqrt(integrate.simps([self.getVolatilities(x,False)[2]**2 + self.getVolatilities(x,False)[3]**2 - 2* self.getVolatilities(x,False)[2]*self.getVolatilities(x,False)[3]*self.getCorrelations(x,False)[1] for x in xArray],xArray)/(self.maturity-t))
except: #indien de wortel negatief is...
gamma2=0
self.correlationsCache[(t,numeric)] = (beta12,gamma12,gamma1,gamma2)
return (beta12,gamma12,gamma1,gamma2)
def getDPlusMin(self,t):
if (t in self.dplusminCache):
return self.dplusminCache[t]
else:
beta12,gamma12,gamma1,gamma2 = self.getCorrelations(t)
tau = self.maturity-t
C1t=self.C1.approximate(t)
C2t=self.C2.approximate(t)
P1t=self.P1.approximate(t)
P2t=self.P2.approximate(t)
if (C1t>0 and C2t>0 and tau>0 and gamma1>0):
d1plus = (math.log(C1t/C2t) + tau*(gamma1**2)/2)/(gamma1*math.sqrt(tau))
d1min = (math.log(C1t/C2t) - tau*(gamma1**2)/2)/(gamma1*math.sqrt(tau))
elif (C2t==0 and C1t>0 and tau>0 and gamma1>0):
d1plus = math.inf
d1min = -math.inf
elif (tau==0 and C1t > C2t) or gamma1==0:
d1plus = math.inf
d1min = math.inf
else:
d1plus = -math.inf
d1min = -math.inf
if (P1t>0 and P2t>0 and tau>0 and gamma2>0):
d2plus = (math.log(P2t/P1t) + tau*(gamma2**2)/2)/(gamma2*math.sqrt(tau))
d2min = (math.log(P2t/P1t) - tau*(gamma2**2)/2)/(gamma2*math.sqrt(tau))
elif (P1t==0 and P2t>0 and tau>0 and gamma2>0):
d2plus = math.inf
d2min = -math.inf
elif (tau==0 and P2t > P1t) or gamma2==0:
d2plus = math.inf
d2min = math.inf
else:
d2plus = -math.inf
d2min = -math.inf
self.dplusminCache[t] = (d1plus,d1min,d2plus,d2min)
return (d1plus,d1min,d2plus,d2min)
#partialDerivative wrt to price (Sk)
def partialDerivative(self,t,price):
if (price not in self.prices):
return 0
else:
if ((t,price) in self.partialCache):
return self.partialCache[(t,price)]
else:
d1plus,d1min,d2plus,d2min = self.getDPlusMin(t)
E1 = self.C1.partialDerivative(t,price) * norm.cdf(d1plus) - self.C2.partialDerivative(t,price) * norm.cdf(d1min)
E2 = self.P2.partialDerivative(t,price) * norm.cdf(d2plus) - self.P1.partialDerivative(t,price) * norm.cdf(d2min)
self.partialCache[(t,price)] = E1 + E2
return E1 + E2
def approximate(self,t):
if (t in self.approxCache):
return self.approxCache[t]
elif t==self.maturity:
return self.payoff(True)
else:
d1plus,d1min,d2plus,d2min = self.getDPlusMin(t)
E1 = self.C1.approximate(t) * norm.cdf(d1plus) - self.C2.approximate(t) * norm.cdf(d1min)
E2 = self.P2.approximate(t) * norm.cdf(d2plus) - self.P1.approximate(t) * norm.cdf(d2min)
self.approxCache[t]=E1+E2
return E1+E2
def approxGamma(self): #op tijdstip 0
T = self.maturity
r = self.prices[0].drift
N=len(self.prices)
F = sum([self.thetas[i]*self.prices[i].approximate(T) for i in range(N)])
K = sum([self.thetas[i]*self.strikes[i] for i in range(N)])
M2 = 0
for i in range(N):
for j in range(N):
sigmai = self.prices[i].volatility
sigmaj = self.prices[j].volatility
rhoij = self.prob.getCorrelation(self.prices[i],self.prices[j])
M2+= self.thetas[i]*self.thetas[j]*self.prices[i].approximate(T)*self.prices[i].approximate(T) * math.exp(T*sigmai*sigmaj*rhoij)
M2/=(F**2)
alpha = (2*M2-1)/(M2-1)
beta = 1-(1/M2)
return math.exp(-r*T) * ( F * gamma.cdf(F/K,a=alpha-1,scale=beta) - K * gamma.cdf(F/K,a=alpha,scale=beta))
def calibrate(prob,contract,amount=100000,reltol=1e-09):
ATM=0 #we bepalen de strike die nodig is om een At the Money optie te hebben.
for i in range(len(contract.prices)):
ATM+=contract.prices[i].initial*contract.thetas[i]
strikes=contract.strikes #tijdelijk om later de strikes te herstellen.
contract.setStrike(ATM)
T=contract.maturity
r=contract.prices[0].drift
simulated,error = prob.simulate(contract,r,T,amount)
uppernu=1
contract.setNu(uppernu)
if (contract.approximate(0)<simulated):
while (contract.approximate(0)<simulated):
uppernu*=2
contract.setNu(uppernu)
lowernu=uppernu/2
else:
while (contract.approximate(0)>simulated):
uppernu/=2
contract.setNu(uppernu)
lowernu=uppernu
uppernu*=2
while not math.isclose(lowernu,uppernu,rel_tol=reltol):
contract.setNu((lowernu+uppernu)/2)
approx = contract.approximate(0)
if (approx < simulated):
lowernu=(lowernu+uppernu)/2
else:
uppernu=(lowernu+uppernu)/2
contract.setStrike(strikes)
return simulated,error,uppernu
if __name__=="__main__":
numpy.random.seed(3141592653)
thetas = [0.35,0.25,0.20,0.15,0.05]
N=len(thetas)
rho=0.5
correlations = numpy.ones((N,N))*rho+numpy.diag([1-rho]*N)
volatilities = [0.5] * N
drifts = [0.05] * N
initials = [100] *N
prob = Probability(correlations,initials,drifts,volatilities)
T = 1
K = 100
basket=Basket(prob,K,prob.prices,T,thetas)
print("gamma",basket.approxGamma())
print()
print("calibrate",basket.approximate(0))
print()
amount=100000
simulated,error = prob.simulate(basket,drifts[0],T,amount)
print("simulated, std error",simulated,error)
print()
basket=Basket(prob,K,prob.prices,T,thetas,method="integral")
print("integral",basket.approximate(0))
| {"/tests.py": ["/prices.py", "/basket.py", "/exchange.py", "/option.py", "/rainbow.py"], "/rainbow.py": ["/prices.py", "/basket.py"], "/basket.py": ["/prices.py", "/option.py"], "/option.py": ["/prices.py"], "/results.py": ["/prices.py", "/basket.py", "/rainbow.py"]} |
77,666 | gellyfisher/masterproef | refs/heads/master | /option.py | import math
import numpy
from scipy.stats import norm
import scipy.integrate as integrate
import time
from prices import *
class Option: #a plain call (theta=1) or put option (theta=-1)
def __init__(self,strike,price,maturity,theta=1):
self.strike = strike
self.price = price
self.theta = theta
self.maturity = maturity
self.partialCache = {}
self.approxCache = {}
def payoff(self,approx=False):
if not approx:
return max(0,self.theta*(self.price.value-self.strike))
else:
return max(0,self.theta*(self.price.approximate(self.maturity)-self.strike))
def approximate(self,t):#precies indien t=0 dit geval
if (t in self.approxCache):
return self.approxCache[t]
elif t==self.maturity:
return self.payoff(True)
else:
s = 1 if self.theta>=0 else -1
r=self.price.drift
tau=self.maturity-t
if (callable(self.price.volatility)): #we hebben een niet constante volatiliteit
sigma = math.sqrt(integrate.quad(lambda t:self.price.volatility(t)**2,0,tau)[0]/tau)
else:
sigma = self.price.volatility
if (self.strike>0):
dplus=(math.log(self.price.approximate(t)/self.strike)+(r+(sigma**2)/2)*tau)/(sigma*math.sqrt(tau))
dmin=(math.log(self.price.approximate(t)/self.strike)+(r-(sigma**2)/2)*tau)/(sigma*math.sqrt(tau))
else:
dplus=math.inf
dmin=math.inf
self.approxCache[t] = self.theta*(self.price.approximate(t)*norm.cdf(s*dplus)-math.exp(-tau*r)*self.strike*norm.cdf(s*dmin))
return self.approxCache[t]
def partialDerivative(self,t,price):
if (price!=self.price):
return 0
else:
if (t in self.partialCache):
return self.partialCache[t]
else:
s = 1 if self.theta>=0 else -1
r=self.price.drift
sigma=self.price.volatility
tau=self.maturity-t
if (self.strike>0 and tau>0):
dplus=(math.log(self.price.approximate(t)/self.strike)+(r+(sigma**2)/2)*tau)/(sigma*math.sqrt(tau))
elif tau==0 and self.price.approximate(t)<self.strike:
dplus=-math.inf
else:
dplus=math.inf
self.partialCache[t] = self.theta * norm.cdf(s*dplus)
return self.partialCache[t]
def gamma(self,t):
s = 1 if self.theta>=0 else -1
r=self.price.drift
sigma=self.price.volatility
tau=self.maturity-t
dplus=s*(math.log(self.price.approximate(t)/self.strike)+(r+(sigma**2)/2)*tau)/(sigma*math.sqrt(tau))
return self.theta * (math.exp(-0.5*dplus**2))/(math.sqrt(2*math.pi*tau)*sigma*self.price.approximate(t))
| {"/tests.py": ["/prices.py", "/basket.py", "/exchange.py", "/option.py", "/rainbow.py"], "/rainbow.py": ["/prices.py", "/basket.py"], "/basket.py": ["/prices.py", "/option.py"], "/option.py": ["/prices.py"], "/results.py": ["/prices.py", "/basket.py", "/rainbow.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.