id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8111179 | import os
import sys
from datetime import datetime
from decimal import Decimal
from timeit import timeit
from unittest.case import TestCase
from uuid import UUID, uuid4
from eventsourcing.application import AggregateNotFound, Application
from eventsourcing.persistence import InfrastructureFactory
from eventsourcing.postgres import PostgresDatastore
from eventsourcing.tests.ramdisk import tmpfile_uris
from eventsourcing.tests.test_aggregate import BankAccount
from eventsourcing.tests.test_postgres import drop_postgres_table
TIMEIT_FACTOR = int(os.environ.get("TEST_TIMEIT_FACTOR", default=10))
class TestApplicationWithPOPO(TestCase):
timeit_number = 100 * TIMEIT_FACTOR
started_ats = {}
counts = {}
def setUp(self) -> None:
os.environ[InfrastructureFactory.IS_SNAPSHOTTING_ENABLED] = "yes"
def tearDown(self) -> None:
if InfrastructureFactory.IS_SNAPSHOTTING_ENABLED in os.environ:
del os.environ[InfrastructureFactory.IS_SNAPSHOTTING_ENABLED]
def print_time(self, test_label, duration):
cls = type(self)
if cls not in self.started_ats:
self.started_ats[cls] = datetime.now()
print("\t", f"{cls.__name__: <29} timeit number: {cls.timeit_number}")
self.counts[cls] = 1
else:
self.counts[cls] += 1
rate = f"{self.timeit_number / duration:.0f} events/s"
print(
"\t",
f"{cls.__name__: <29}",
f"{test_label: <21}",
f"{rate: >15}",
f" {1000 * duration / self.timeit_number:.3f} ms/event",
)
if self.counts[cls] == 3:
duration = datetime.now() - cls.started_ats[cls]
print("\t", f"{cls.__name__: <29} timeit duration: {duration}")
sys.stdout.flush()
def test_example_application(self):
app = BankAccounts()
# Check AccountNotFound exception.
with self.assertRaises(BankAccounts.AccountNotFoundError):
app.get_account(uuid4())
# Open an account.
account_id = app.open_account(
full_name="Alice",
email_address="<EMAIL>",
)
# Credit the account.
app.credit_account(account_id, Decimal("10.00"))
app.credit_account(account_id, Decimal("25.00"))
app.credit_account(account_id, Decimal("30.00"))
# Check balance.
self.assertEqual(
app.get_balance(account_id),
Decimal("65.00"),
)
section = app.log["1,10"]
self.assertEqual(len(section.items), 4)
# Take snapshot (specify version).
app.take_snapshot(account_id, version=2)
snapshots = list(app.snapshots.get(account_id, desc=True, limit=1))
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0].originator_version, 2)
from_snapshot = app.repository.get(account_id, version=3)
self.assertIsInstance(from_snapshot, BankAccount)
self.assertEqual(from_snapshot.version, 3)
self.assertEqual(from_snapshot.balance, Decimal("35.00"))
# Take snapshot (don't specify version).
app.take_snapshot(account_id)
snapshots = list(app.snapshots.get(account_id, desc=True, limit=1))
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0].originator_version, 4)
from_snapshot = app.repository.get(account_id)
self.assertIsInstance(from_snapshot, BankAccount)
self.assertEqual(from_snapshot.version, 4)
self.assertEqual(from_snapshot.balance, Decimal("65.00"))
def test__put_performance(self):
app = BankAccounts()
# Open an account.
account_id = app.open_account(
full_name="Alice",
email_address="<EMAIL>",
)
account = app.get_account(account_id)
def put():
# Credit the account.
account.append_transaction(Decimal("10.00"))
app.save(account)
# Warm up.
number = 10
timeit(put, number=number)
duration = timeit(put, number=self.timeit_number)
self.print_time("store events", duration)
def test__get_performance_with_snapshotting_enabled(self):
print()
self._test_get_performance("get with snapshotting")
def test__get_performance_without_snapshotting_enabled(self):
del os.environ[InfrastructureFactory.IS_SNAPSHOTTING_ENABLED]
self._test_get_performance("get no snapshotting")
def _test_get_performance(self, test_label):
app = BankAccounts()
# Open an account.
account_id = app.open_account(
full_name="Alice",
email_address="<EMAIL>",
)
def read():
# Get the account.
app.get_account(account_id)
# Warm up.
timeit(read, number=10)
duration = timeit(read, number=self.timeit_number)
self.print_time(test_label, duration)
class TestApplicationSnapshottingException(TestCase):
def test_take_snapshot_raises_assertion_error_if_snapshotting_not_enabled(self):
app = Application()
with self.assertRaises(AssertionError) as cm:
app.take_snapshot(uuid4())
self.assertEqual(
cm.exception.args[0],
(
"Can't take snapshot without snapshots store. "
"Please set environment variable IS_SNAPSHOTTING_ENABLED "
"to a true value (e.g. 'y')."
),
)
class TestApplicationWithSQLite(TestApplicationWithPOPO):
timeit_number = 30 * TIMEIT_FACTOR
def setUp(self) -> None:
super().setUp()
self.uris = tmpfile_uris()
# self.db_uri = next(self.uris)
os.environ["INFRASTRUCTURE_FACTORY"] = "eventsourcing.sqlite:Factory"
os.environ["CREATE_TABLE"] = "y"
os.environ["SQLITE_DBNAME"] = next(self.uris)
def tearDown(self) -> None:
del os.environ["INFRASTRUCTURE_FACTORY"]
del os.environ["CREATE_TABLE"]
del os.environ["SQLITE_DBNAME"]
super().tearDown()
class TestApplicationWithPostgres(TestApplicationWithPOPO):
timeit_number = 5 * TIMEIT_FACTOR
def setUp(self) -> None:
super().setUp()
self.uris = tmpfile_uris()
os.environ["INFRASTRUCTURE_FACTORY"] = "eventsourcing.postgres:Factory"
os.environ["CREATE_TABLE"] = "y"
os.environ["POSTGRES_DBNAME"] = "eventsourcing"
os.environ["POSTGRES_HOST"] = "127.0.0.1"
os.environ["POSTGRES_USER"] = "eventsourcing"
os.environ["POSTGRES_PASSWORD"] = "<PASSWORD>"
db = PostgresDatastore(
os.getenv("POSTGRES_DBNAME"),
os.getenv("POSTGRES_HOST"),
os.getenv("POSTGRES_USER"),
os.getenv("POSTGRES_PASSWORD"),
)
drop_postgres_table(db, "bankaccounts_events")
drop_postgres_table(db, "bankaccounts_snapshots")
def tearDown(self) -> None:
del os.environ["INFRASTRUCTURE_FACTORY"]
del os.environ["CREATE_TABLE"]
del os.environ["POSTGRES_DBNAME"]
del os.environ["POSTGRES_HOST"]
del os.environ["POSTGRES_USER"]
del os.environ["POSTGRES_PASSWORD"]
super().tearDown()
class BankAccounts(Application):
def open_account(self, full_name, email_address):
account = BankAccount.open(
full_name=full_name,
email_address=email_address,
)
self.save(account)
return account.id
def credit_account(self, account_id: UUID, amount: Decimal) -> None:
account = self.get_account(account_id)
account.append_transaction(amount)
self.save(account)
def get_balance(self, account_id: UUID) -> Decimal:
account = self.get_account(account_id)
return account.balance
def get_account(self, account_id: UUID) -> BankAccount:
try:
aggregate = self.repository.get(account_id)
except AggregateNotFound:
raise self.AccountNotFoundError(account_id)
else:
if not isinstance(aggregate, BankAccount):
raise self.AccountNotFoundError(account_id)
return aggregate
class AccountNotFoundError(Exception):
pass
| StarcoderdataPython |
8072637 | <reponame>MiningTheDisclosures/conflict-minerals-data
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-07 04:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('edgar', '0004_auto_20170706_2035'),
]
operations = [
migrations.AlterModelOptions(
name='edgarsdfiling',
options={'verbose_name': 'SD Filing', 'verbose_name_plural': 'SD Filings'},
),
]
| StarcoderdataPython |
5066364 | <filename>models/python/demo-dataset/15.py
def fibonacci_memo(input_value):
fibonacci_cache = {}
if input_value in fibonacci_cache:
return fibonacci_cache[input_value]
if input_value == 1:
value = 1
elif input_value == 2:
value = 1
elif input_value > 2:
value = fibonacci_memo(input_value -1) + fibonacci_memo(input_value -2)
fibonacci_cache[input_value] = value
return value
for i in range(1, 201):
print("fib({}) = ".format(i), fibonacci_memo(i)) | StarcoderdataPython |
1824282 | <reponame>stephaniemhutson/BoxPackingAPI
from fulfillment_api.api_verify import verify_box_api
from fulfillment_api.constants import permissions
from .errors import APIError, BoxError
from .messages as msg
from ..authentication.login_required import (login_required,
shotput_permission_required)
from ..crossdomain import crossdomain
from ..permissions.decorators import view_requires_team_permission
from .helper import (api_packing_algorithm, compare_1000_times,
how_many_items_fit, pre_pack_boxes, space_after_packing)
from flask import Blueprint, current_app, jsonify, request
blueprint = Blueprint('box_packing', __name__)
@blueprint.route('/box_packing_api/basic',
methods=['POST', 'OPTIONS'])
@crossdomain(api=True)
@login_required
@verify_box_api
@view_requires_team_permission(permissions.box_packing_read)
def get_best_fit():
'''
A non-database calling endpoint which is a simple usage of the box packing
algorithm which accepts json with items and a single box.
Returns:
'packages': List[Dict[
items_packed: Dict[item, quantity]
total_weight: float
'''
json_data = request.get_json(force=True)
current_app.log.data(json_data)
try:
products_info = json_data['products_info']
box_info = json_data['box_info']
options = json_data.get('options', {})
except KeyError as e:
current_app.log.error(e)
return jsonify(error=msg.missing_value_for(e)), 400
try:
items_arrangement = pre_pack_boxes(box_info, products_info, options)
except BoxError as e:
current_app.log.error(e)
return jsonify(error=e.message), 400
except TypeError as e:
current_app.log.error(e)
return jsonify(error='Invalid data in request.'), 400
except ValueError as e:
current_app.log.error(e)
value = e.message.split(' ')[-1]
return jsonify(error=('Invalid data in request. Check value {}'
.format(value))), 400
except KeyError as e:
current_app.log.error(e)
return jsonify(error=msg.missing_value_for(e.message))
except APIError as e:
current_app.log.error(e)
return jsonify(error=e.message), e.status_code
return jsonify(packages=items_arrangement)
@blueprint.route('/box_packing_api/remaining_volume',
methods=['POST', 'OPTIONS'])
@crossdomain(api=True)
@login_required
@verify_box_api
@view_requires_team_permission(permissions.box_packing_read)
def get_space_after_packing():
'''
Non-database calling endpoint which calculates the remaining volume in a
block after packing. Assumes box and item are of same units
Input:
{
"box_info": {
"width": 9,
"height": 8,
"length": 5
},
"item_info": {
"width": 9,
"height": 8,
"length": 4
}
}
Output:
{
"remaining_dimensional_blocks": [
{
"height": 8,
"length": 9,
"width": 1
}
],
"remaining_volume": 72
}
'''
json_data = request.get_json(force=True)
current_app.log.data(json_data)
try:
item_info = json_data['product_info']
box_info = json_data['box_info']
space = space_after_packing(item_info, box_info)
except KeyError as e:
current_app.log.error(e)
return jsonify(error=msg.missing_value_for(e.message)), 400
except TypeError as e:
current_app.log.error(e)
return jsonify(error=msg.invalid_data), 400
except BoxError as e:
current_app.log.error(e)
return jsonify(error=e.message), 400
except APIError as e:
current_app.log.error(e)
return jsonify(error=e.message), e.status_code
return jsonify(space)
@blueprint.route('/box_packing_api/capacity', methods=['POST', 'OPTIONS'])
@crossdomain(api=True)
@login_required
@verify_box_api
@view_requires_team_permission(permissions.box_packing_read)
def how_many_fit():
'''
non-database hitting endpoint which calculates the capacity of a box
given a item size. Assumes dimensional units are the same.
Same inputs as remaining_volume.
Outputs:
{
"remaining_volume": 72,
"total_packed": 1
}
'''
json_data = request.get_json(force=True)
current_app.log.data(json_data)
try:
item_info = json_data['product_info']
box_info = json_data['box_info']
max_packed = json_data.get('max_packed')
return jsonify(how_many_items_fit(item_info, box_info, max_packed))
except KeyError as e:
current_app.log.error(e)
return jsonify(error=msg.missing_value_for(e.message)), 400
except TypeError as e:
current_app.log.error(e)
return jsonify(error=msg.invalid_data), 400
except BoxError as e:
current_app.log.error(e)
return jsonify(error=e.message)
except ValueError as e:
current_app.log.error(e)
value = e.message.split(' ')[-1]
return jsonify(error=('Invalid data in request. Check value {}'
.format(value))), 400
except APIError as e:
current_app.log.error(e)
return jsonify(error=e.message), e.status_code
@blueprint.route('/box_packing_api/compare_packing_efficiency',
methods=['GET', 'OPTIONS'])
@crossdomain(api=True)
@login_required
@shotput_permission_required
@view_requires_team_permission(permissions.global_god_mode)
def compare_pack():
'''
endpoint which can be used to verify the accuracy of
shotput v pyshipping
'''
params = request.args.to_dict()
current_app.log.data(params)
trials = params.get('trials')
return jsonify(compare_1000_times(trials))
@blueprint.route('/box_packing_api/full', methods=['POST', 'OPTIONS'])
@crossdomain(api=True)
@login_required
@verify_box_api
@view_requires_team_permission(permissions.box_packing_read)
def box_packing_api():
'''
a full access endpoint to the box algorithm, which accepts boxes and items
and returns the best box and the items arrangement
Outputs:
Dict[
'package_contents': List[Dict[
packed_prodcuts: Dict[item, quantity]
total_weight: float
box: Dict[
weight: float
height: float
length: float
width: float
dimension_units: ('inches', 'centimeters', 'feet', 'meters')
weight_units: ('grams', 'pounds', 'kilograms', 'onces')
name: String
]
]
]
'''
json_data = request.get_json(force=True)
current_app.log.data(json_data)
try:
boxes_info = json_data['boxes_info']
products_info = json_data['products_info']
options = json_data.get('options', {})
package_contents = api_packing_algorithm(boxes_info, products_info,
options)
except KeyError as e:
current_app.log.error(e)
return jsonify(error=msg.missing_value_for(e.message)), 400
except TypeError as e:
current_app.log.error(e)
return jsonify(error=msg.invalid_data), 400
except BoxError as e:
current_app.log.error(e)
return jsonify(error=e.message)
except ValueError as e:
current_app.log.error(e)
value = e.message.split(' ')[-1]
return jsonify(error=('Invalid data in request. Check value {}'
.format(value))), 400
except APIError as e:
current_app.log.error(e)
return jsonify(error=e.message), e.status_code
return jsonify(package_contents)
| StarcoderdataPython |
3256622 | from collections import defaultdict
from itertools import count
from operator import itemgetter
from pathlib import Path
from typing import Dict, Optional
from typing import List, Tuple, Union
import htbuilder
import streamlit as st
from htbuilder import span, div, script, style, link, styles, HtmlElement, br
from htbuilder.units import px
from spacy.tokens import Doc
palette = [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854",
"#ffd92f",
"#e5c494",
"#b3b3b3",
]
inactive_color = "#BBB"
def local_stylesheet(path):
with open(path) as f:
css = f.read()
return style()(
css
)
def remote_stylesheet(url):
return link(
href=url
)
def local_script(path):
with open(path) as f:
code = f.read()
return script()(
code
)
def remote_script(url):
return script(
src=url
)
def get_color(sent_idx):
return palette[sent_idx % len(palette)]
def hex_to_rgb(hex):
hex = hex.replace("#", '')
return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4))
def color_with_opacity(hex_color, opacity):
rgb = hex_to_rgb(hex_color)
return f"rgba({rgb[0]},{rgb[1]},{rgb[2]},{opacity:.2f})"
class Component:
def show(self, width=None, height=None, scrolling=True, **kwargs):
out = div(style=styles(
**kwargs
))(self.html())
html = str(out)
st.components.v1.html(html, width=width, height=height, scrolling=scrolling)
def html(self):
raise NotImplemented
class MainView(Component):
def __init__(
self,
document: Doc,
summaries: List[Doc],
semantic_alignments: Optional[List[Dict]],
lexical_alignments: Optional[List[Dict]],
layout: str,
scroll: bool,
gray_out_stopwords: bool
):
self.document = document
self.summaries = summaries
self.semantic_alignments = semantic_alignments
self.lexical_alignments = lexical_alignments
self.layout = layout
self.scroll = scroll
self.gray_out_stopwords = gray_out_stopwords
def html(self):
# Add document elements
if self.document._.name == 'Document':
document_name = 'Source Document'
else:
document_name = self.document._.name + ' summary'
doc_header = div(
id_="document-header"
)(
document_name
)
doc_elements = []
# Add document content, which comprises multiple elements, one for each summary. Only the elment corresponding to
# selected summary will be visible.
mu = MultiUnderline()
for summary_idx, summary in enumerate(self.summaries):
token_idx_to_sent_idx = {}
for sent_idx, sent in enumerate(summary.sents):
for token in sent:
token_idx_to_sent_idx[token.i] = sent_idx
is_selected_summary = (summary_idx == 0) # By default, first summary is selected
if self.semantic_alignments is not None:
doc_token_idx_to_matches = defaultdict(list)
semantic_alignment = self.semantic_alignments[summary_idx]
for summary_token_idx, matches in semantic_alignment.items():
for doc_token_idx, sim in matches:
doc_token_idx_to_matches[doc_token_idx].append((summary_token_idx, sim))
else:
doc_token_idx_to_matches = {}
token_elements = []
for doc_token_idx, doc_token in enumerate(self.document):
if doc_token.is_stop or doc_token.is_punct:
classes = ["stopword"]
if self.gray_out_stopwords:
classes.append("grayed-out")
el = span(
_class=" ".join(classes)
)(
doc_token.text
)
else:
matches = doc_token_idx_to_matches.get(doc_token_idx)
if matches:
summary_token_idx, sim = max(matches, key=itemgetter(1))
sent_idx = token_idx_to_sent_idx[summary_token_idx]
color_primary = get_color(sent_idx)
highlight_color_primary = color_with_opacity(color_primary, sim)
props = {
'data-highlight-id': str(doc_token_idx),
'data-primary-color': highlight_color_primary
}
match_classes = []
for summary_token_idx, sim in matches:
sent_idx = token_idx_to_sent_idx[summary_token_idx]
match_classes.append(f"summary-highlight-{summary_idx}-{summary_token_idx}")
color = color_with_opacity(get_color(sent_idx), sim)
props[f"data-color-{summary_idx}-{summary_token_idx}"] = color
props["data-match-classes"] = " ".join(match_classes)
el = self._highlight(
doc_token.text,
highlight_color_primary,
color_primary,
match_classes + ["annotation-hidden"],
**props
)
else:
el = doc_token.text
token_elements.append(el)
spans = []
if self.lexical_alignments is not None:
lexical_alignment = self.lexical_alignments[summary_idx]
for summary_span, doc_spans in lexical_alignment.items():
summary_span_start, summary_span_end = summary_span
span_id = f"{summary_idx}-{summary_span_start}-{summary_span_end}"
sent_idx = token_idx_to_sent_idx[summary_span_start]
for doc_span_start, doc_span_end in doc_spans:
spans.append((
doc_span_start,
doc_span_end,
sent_idx,
get_color(sent_idx),
span_id
))
token_elements = mu.markup(token_elements, spans)
classes = ["main-doc", "bordered"]
if self.scroll:
classes.append("scroll")
main_doc = div(
_class=" ".join(classes)
)(
token_elements
),
classes = ["doc"]
if is_selected_summary:
classes.append("display")
else:
classes.append("nodisplay")
doc_elements.append(
div(
**{
"class": " ".join(classes),
"data-index": summary_idx
}
)(
main_doc,
div(_class="proxy-doc"),
div(_class="proxy-scroll")
)
)
summary_title = "Summary"
summary_header = div(
id_="summary-header"
)(
summary_title,
div(id="summary-header-gap"),
)
summary_items = []
for summary_idx, summary in enumerate(self.summaries):
token_idx_to_sent_idx = {}
for sent_idx, sent in enumerate(summary.sents):
for token in sent:
token_idx_to_sent_idx[token.i] = sent_idx
spans = []
matches_ngram = [False] * len(list(summary))
if self.lexical_alignments is not None:
lexical_alignment = self.lexical_alignments[summary_idx]
for summary_span in lexical_alignment.keys():
start, end = summary_span
matches_ngram[slice(start, end)] = [True] * (end - start)
span_id = f"{summary_idx}-{start}-{end}"
sent_idx = token_idx_to_sent_idx[start]
spans.append((
start,
end,
sent_idx,
get_color(sent_idx),
span_id
))
if self.semantic_alignments is not None:
semantic_alignment = self.semantic_alignments[summary_idx]
else:
semantic_alignment = {}
token_elements = []
for token_idx, token in enumerate(summary):
if token.is_stop or token.is_punct:
classes = ["stopword"]
if self.gray_out_stopwords:
classes.append("grayed-out")
el = span(
_class=" ".join(classes)
)(
token.text
)
else:
classes = []
if token.ent_iob_ in ('I', 'B'):
classes.append("entity")
if matches_ngram[token_idx]:
classes.append("matches-ngram")
matches = semantic_alignment.get(token_idx)
if matches:
top_match = max(matches, key=itemgetter(1))
top_sim = max(top_match[1], 0)
top_doc_token_idx = top_match[0]
props = {
"data-highlight-id": f"{summary_idx}-{token_idx}",
"data-top-doc-highlight-id": str(top_doc_token_idx),
"data-top-doc-sim": f"{top_sim:.2f}",
}
classes.extend([
"annotation-hidden",
f"summary-highlight-{summary_idx}-{token_idx}"
])
sent_idx = token_idx_to_sent_idx[token_idx]
el = self._highlight(
token.text,
color_with_opacity(get_color(sent_idx), top_sim),
color_with_opacity(get_color(sent_idx), 1),
classes,
**props
)
else:
if classes:
el = span(_class=" ".join(classes))(token.text)
else:
el = token.text
token_elements.append(el)
token_elements = mu.markup(token_elements, spans)
classes = ["summary-item"]
if summary_idx == 0: # Default is for first summary to be selected
classes.append("selected")
summary_items.append(
div(
**{"class": ' '.join(classes), "data-index": summary_idx}
)(
div(_class="name")(summary._.name),
div(_class="content")(token_elements)
)
)
classes = ["summary-list", "bordered"]
if self.scroll:
classes.append("scroll")
if self.lexical_alignments is not None:
classes.append("has-lexical-alignment")
if self.semantic_alignments is not None:
classes.append("has-semantic-alignment")
summary_list = div(
_class=" ".join(classes)
)(
summary_items
)
annotation_key = \
"""
<ul class="annotation-key">
<li class="annotation-key-label">Annotations:</li>
<li id="option-lexical" class="option selected">
<span class="annotation-key-ngram">N-Gram overlap</span>
</li>
<li id="option-semantic" class="option selected">
<span class="annotation-key-semantic">Semantic overlap</span>
</li>
<li id="option-novel" class="option selected">
<span class="annotation-key-novel">Novel words</span>
</li>
<li id="option-entity" class="option selected">
<span class="annotation-key-entity">Novel entities</span>
</li>
</ul>
"""
body = div(
annotation_key,
div(
_class=f"vis-container {self.layout}-layout"
)(
div(
_class="doc-container"
)(
doc_header,
*doc_elements
),
div(
_class="summary-container"
)(
summary_header,
summary_list
)
),
)
return [
"""<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="<KEY>" crossorigin="anonymous">""",
local_stylesheet(Path(__file__).parent / "resources" / "summvis.css"),
"""<link rel="preconnect" href="https://fonts.gstatic.com">
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@400;500&display=swap" rel="stylesheet">""",
body,
"""<script
src="https://code.jquery.com/jquery-3.5.1.min.js"
integrity="sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0="
crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.6.0/dist/js/bootstrap.bundle.min.js"
integrity="<KEY>"
crossorigin="anonymous"></script>""",
local_script(Path(__file__).parent / "resources" / "jquery.color-2.1.2.min.js"),
local_script(Path(__file__).parent / "resources" / "summvis.js"),
"""<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/js/bootstrap.bundle.min.js" integrity="<KEY>" crossorigin="anonymous"></script>"""
]
def _highlight(
self,
token: Union[str, HtmlElement],
background_color,
dotted_underline_color,
classes: List[str],
**props
):
return span(
_class=" ".join(classes + ["highlight"]),
style=styles(
background_color=background_color,
border_bottom=f"4px dotted {dotted_underline_color}",
),
**props
)(token)
SPACE = " "
class MultiUnderline:
def __init__(
self,
underline_thickness=3,
underline_spacing=1
):
self.underline_thickness = underline_thickness
self.underline_spacing = underline_spacing
def markup(
self,
tokens: List[Union[str, HtmlElement]],
spans: List[Tuple[int, int, int, str, str]]
):
"""Style text with multiple layers of colored underlines.
Args:
tokens: list of tokens, either string or html element
spans: list of (start_pos, end_pos, rank, color, id) tuples defined as:
start_pos: start position of underline span
end_pos: end position of underline span
rank: rank for stacking order of underlines, all else being equal
color: color of underline
id: id of underline (encoded as a class label in resulting html element)
Returns:
List of HTML elements
"""
# Map from span start position to span
start_to_spans = defaultdict(list)
for span in spans:
start = span[0]
start_to_spans[start].append(span)
# Map from each underline slot position to list of active spans
slot_to_spans = {}
# Collection of html elements
elements = []
first_token_in_line = True
for pos, token in enumerate(tokens):
# Remove spans that are no longer active (end < pos)
slot_to_spans = defaultdict(
list,
{
slot: [span for span in spans if span[1] > pos] # span[1] contains end of spans
for slot, spans in slot_to_spans.items() if spans
}
)
# Add underlines to space between tokens for any continuing underlines
if first_token_in_line:
first_token_in_line = False
else:
elements.append(self._get_underline_element(SPACE, slot_to_spans))
# Find slot for any new spans
new_spans = start_to_spans.pop(pos, None)
if new_spans:
new_spans.sort(
key=lambda span: (-(span[1] - span[0]), span[2])) # Sort by span length (reversed), rank
for new_span in new_spans:
# Find an existing slot or add a new one
for slot, spans in sorted(slot_to_spans.items(), key=itemgetter(0)): # Sort by slot index
if spans:
containing_span = spans[
0] # The first span in the slot strictly contains all other spans
containing_start, containing_end = containing_span[0:2]
containing_color = containing_span[3]
start, end = new_span[0:2]
color = new_span[3]
# If the new span (1) is strictly contained in this span, or (2) exactly matches this span
# and is the same color, then add span to this slot
if end <= containing_end and (
(start > containing_start or end < containing_end) or
(start == containing_start and end == containing_end and color == containing_color)
):
spans.append(new_span)
break
else:
# Find a new slot index to add the span
for slot_index in count():
spans = slot_to_spans[slot_index]
if not spans: # If slot is free, take it
spans.append(new_span)
break
if token in ("\n", "\r", "\r\n"):
elements.append(br())
first_token_in_line = True
else:
# Add underlines to token for all active spans
elements.append(self._get_underline_element(token, slot_to_spans))
return elements
def _get_underline_element(self, token, slot_to_spans):
if not slot_to_spans:
return token
max_slot_index = max(slot_to_spans.keys())
element = token
for slot_index in range(max_slot_index + 1):
spans = slot_to_spans[slot_index]
if not spans:
color = "rgba(0, 0, 0, 0)" # Transparent element w/opacity=0
props = {}
else:
containing_slot = spans[0]
color = containing_slot[3]
classes = ["underline"]
if token != SPACE:
classes.append("token-underline")
classes.extend([f"span-{span[4]}" for span in spans]) # Encode ids in class names
props = {
"class": " ".join(classes),
"data-primary-color": color
}
if slot_index == 0:
padding_bottom = 0
else:
padding_bottom = self.underline_spacing
display = "inline-block"
element = htbuilder.span(
style=styles(
display=display,
border_bottom=f"{self.underline_thickness}px solid",
border_color=color,
padding_bottom=px(padding_bottom),
),
**props
)(element)
# Return outermost nested span
return element
if __name__ == "__main__":
from htbuilder import div
# Test
text = "The quick brown fox jumps"
tokens = text.split()
tokens = [
"The",
htbuilder.span(style=styles(color="red"))("quick"),
"brown",
"fox",
"jumps"
]
spans = [
(0, 2, 0, "green", "green1"),
(1, 3, 0, "orange", "orange1"),
(3, 4, 0, "red", "red1"),
(2, 4, 0, "blue", "blue1"),
(1, 5, 0, "orange", "orange1"),
]
mu = MultiUnderline()
html = str(div(mu.markup(tokens, spans)))
print(html)
| StarcoderdataPython |
6616355 |
#
# https://stackoverflow.com/a/47706195/1832058
#
import requests
import re
from bs4 import BeautifulSoup
# Create a variable with the url
url = 'https://www.ncbi.nlm.nih.gov/protein/EGW15053.1?report=fasta'
# Use requests to get the contents
r = requests.get(url)
# Get the text of the contents
html_content = r.text
# Convert the html content into a beautiful soup object
soup = BeautifulSoup(html_content, 'html.parser')
div = soup.find_all('div', attrs={'class', 'seq gbff'})
for each in div.children:
print(each)
soup.find_all('span', aatrs={'class', 'ff_line'})
That's actually easy for me. div = soup.find_all('div', attrs={'class', 'seq gbff'}) contains the unique value for each page I want to access, just have to replace the id in each url.
url = 'https://www.ncbi.nlm.nih.gov/sviewer/viewer.fcgi?id=344258949&db=protein&report=fasta&extrafeat=0&fmt_mask=0&retmode=html&withmarkup=on&tool=portal&log$=seqview&maxdownloadsize=1000000'
I checked url need only three arguments to get data id=344258949&report=fasta&retmode=text
| StarcoderdataPython |
3422097 | """Process reporting of tests or test steps."""
from __future__ import annotations
import signal
from datetime import datetime
from enum import IntEnum
from functools import singledispatch
from io import BufferedReader, BytesIO
from types import FrameType, TracebackType
from typing import Any, Literal, NoReturn, Tuple
import pytest
from ._helpers import Attachment, build_terminal_report, get_item_nodeid, get_spec, html_row
from ._pytest_adaptavist import PytestAdaptavist
class MetaBlockAborted(Exception):
"""Internal exception used to abort meta block execution."""
class MetaBlock:
"""
Context Manager class used for processing/reporting single test blocks/steps.
:param request:
:param timeout: Timeout in seconds
:param step: step number as integer starting at 1
"""
class Action(IntEnum):
"""Action to take, if a test case fails."""
NONE = 0
"""If condition fails, collect assumption, set block/test to 'Fail' and continue (just like 'assume')."""
FAIL_CONTEXT = 0
"""If condition fails, skip execution of this block, set it to 'Blocked' and continue with next block."""
STOP_CONTEXT = 1
"""If condition fails, skip execution of this block/test, set it to 'Fail' and continue with next test (just like 'assert')."""
FAIL_METHOD = 2
"""If condition fails, skip execution of this block/test, set it to 'Blocked' and continue with next test."""
STOP_METHOD = 3
"""If condition fails, skip execution of this block/test, set it to 'Fail' and block following tests."""
FAIL_SESSION = 4
"""If condition fails, skip execution of this block/test, set it to 'Blocked' and block following tests as well."""
STOP_SESSION = 5
"""If condition fails, skip execution of this block/test, set it to 'Blocked' and exit session."""
FAIL_EXIT_SESSION = 6
"""If condition fails, skip execution of this block/test, set it to 'Blocked' and exit session."""
STOP_EXIT_SESSION = 7
def __init__(self, request: pytest.FixtureRequest, timeout: int, step: int | None = None):
fullname = get_item_nodeid(request.node)
self.item = request.node
self.items = request.session.items
self.item_name = self.item.name + ("_" + str(step) if step else "")
self.step = step
self.start = datetime.now().timestamp()
self.stop = datetime.now().timestamp()
self.timeout = timeout
self.adaptavist: PytestAdaptavist = request.config.pluginmanager.getplugin("_adaptavist")
self.data: dict[str, Any] = self.adaptavist.test_result_data.setdefault(fullname + ("_" + str(step) if step else ""), {
"comment": None, "attachment": None
})
@staticmethod
def _timeout_handler(signum: int, frame: FrameType | None) -> NoReturn:
"""Handle test cases running to long."""
raise TimeoutError("The test step exceeded its timewindow and timed out")
def __enter__(self) -> MetaBlock:
if self.step:
# level = 2 to get info from outside of this plugin (i.e. caller of 'with metablock(...)')
build_terminal_report(when="setup", item=self.item, step=self.step, level=2)
self.start = datetime.now().timestamp()
self.adaptavist.failed_assumptions_step = []
signal.signal(signal.SIGALRM, self._timeout_handler)
signal.alarm(self.timeout)
return self
def __exit__(self, exc_type: type, exc_value: Exception, traceback: TracebackType) -> bool:
signal.alarm(0)
self.stop = datetime.now().timestamp()
fullname = get_item_nodeid(self.item)
if exc_type is TimeoutError:
self.data["blocked"] = True
pytest.skip(msg=f"Blocked. {self.item_name} failed: The test step exceeded its timewindow and timed out")
skip_status = self.item.get_closest_marker("block") or self.item.get_closest_marker("skip")
# if method was blocked dynamically (during call) an appropriate marker is used
# to handle the reporting in the same way as for statically blocked methods
# (status will be reported as "Blocked" with given comment in Adaptavist)
if not skip_status and (exc_type and exc_type in (pytest.block.Exception, pytest.skip.Exception) # type:ignore
or exc_type in (None, MetaBlockAborted) and self.data.get("blocked") is True):
reason = self.data.get("comment") or (
str(exc_value).partition("\n")[0] if exc_type and exc_type in (pytest.block.Exception, pytest.skip.Exception) else "") # type:ignore
skip_status = pytest.mark.block(reason=reason) if ((exc_type and exc_type is pytest.block.Exception) # type:ignore
or self.data.get("blocked", None) is True) else pytest.mark.skip(reason=reason)
# report exceptions
if exc_type and exc_type is not MetaBlockAborted:
exc_info = self.adaptavist.build_exception_info(fullname, exc_type, exc_value, traceback)
if (exc_info and exc_info not in (self.data.get("comment") or "") and (exc_type is not pytest.skip.Exception) and not skip_status):
self.data["comment"] = "".join((self.data.get("comment", None) or "", html_row("failed", exc_info)))
passed = not exc_type and (len(self.adaptavist.failed_assumptions_step) <= len(getattr(pytest, "_failed_assumptions", [])[:]))
status: Literal["passed", "failed", "skipped", "blocked"] = ("passed" if passed else "failed") if not skip_status \
else ("blocked" if (skip_status.name == "block" or self.data.get("blocked")) else "skipped")
# custom item callback
prefix = getattr(self.item.config, "workerinput", {}).get("workerid") \
if getattr(self.item.config, "workerinput", {}).get("options", {}).get("dist") == "each" \
else None
getattr(self.item, "meta_block_cb",
lambda **kwargs: None)(signature="_".join(filter(None, (prefix, self.item.name, str(self.step) if self.step else "x"))), status=status)
if self.step:
build_terminal_report(when="call", item=self.item, status=status, step=self.step,
level=2) # level = 2 to get info from outside of this plugin (i.e. caller of 'with metablock(...)'))
# adjust parent's test result status if necessary (needed for makereport call later)
if self.adaptavist.test_result_data[fullname].get("blocked") is True and not passed and not skip_status:
self.adaptavist.test_result_data[fullname]["blocked"] = None
elif self.data.get("blocked") is True:
self.adaptavist.test_result_data[fullname]["blocked"] = True
if not getattr(self.item.config.option, "adaptavist", False):
# adaptavist reporting disabled: no need to proceed here
return exc_type is MetaBlockAborted # suppress MetaBlockAborted exception
marker = self.item.get_closest_marker("testcase")
if marker is not None:
test_case_key = marker.kwargs["test_case_key"]
test_step_key = marker.kwargs["test_step_key"]
if test_step_key or not self.step:
# if it's a test step method, we should not be here
# if it's the test case context, we can return here as well
# pytest_runtest_makereport takes care about reporting in both cases
return exc_type is MetaBlockAborted # suppress MetaBlockAborted exception
specs = get_spec(get_item_nodeid(self.item))
self.adaptavist.create_report(test_case_key, self.step, self.stop - self.start, skip_status, passed, self.data, specs)
self.data["done"] = True # tell pytest_runtest_makereport that this item has been processed already
return exc_type is MetaBlockAborted # suppress MetaBlockAborted exception
def check(self, condition: bool, message: str | None = None, action_on_fail: Action = Action.NONE, **kwargs: Any):
"""
Check given condition.
:param condition: The condition to be checked
:param message: The info test in case of failed condition
:param action_on_fail: Action in case of failed condition (default: continue, just like 'assume')
:key attachment: The attachment as filepath name or file-like object
:key filename: The optional filename
:key message_on_fail: The info test in case of failed condition (same as message)
:key message_on_pass: The info test in case of passed condition
:key description: Optional details about test results (f.e. can be a html table or more)
"""
attachment = kwargs.pop("attachment", None)
filename = kwargs.pop("filename", None)
description = kwargs.pop("description", None)
message_on_fail = kwargs.pop("message_on_fail", None) or message or ""
message_on_pass = kwargs.pop("message_on_pass", "")
if kwargs:
raise SyntaxWarning(f"Unknown arguments: {kwargs}")
if attachment and self.adaptavist.enabled:
if not self.data.get("attachment_test_case"):
self.data["attachment_test_case"] = []
if not self.data.get("attachment_test_step"):
self.data["attachment_test_step"] = []
content, name = _read_attachment(attachment)
if self.step:
self.data["attachment_test_step"].append(Attachment(content, filename=filename or name or "", step=self.step or 0))
else:
self.data["attachment_test_case"].append(Attachment(content, filename=filename or name or "", step=self.step or 0))
if not condition and message_on_fail:
self.data["comment"] = "".join((self.data.get("comment", "") or "", html_row("failed", message_on_fail)))
elif condition and message_on_pass:
self.data["comment"] = "".join((self.data.get("comment", "") or "", html_row("passed", message_on_pass)))
if description:
self.data["description"] = "<br>".join((self.data.get("description", ""), description))
# custom item callback
prefix = getattr(self.item.config, "workerinput", {}).get("workerid") \
if getattr(self.item.config, "workerinput", {}).get("options", {}).get("dist") == "each" \
else None
self.__dict__["numchecks"] = self.__dict__.get("numchecks", 0) + 1
signature = "_".join(filter(None, (prefix, self.item.name, str(self.step) if self.step else "x", str(self.__dict__["numchecks"]))))
getattr(self.item, "meta_block_condition_cb", lambda **kwargs: None)(signature=signature,
condition=condition,
reference=message_on_pass if condition else message_on_fail)
if condition:
return
self._process_failed_condition(action_on_fail, message_on_fail)
def _process_failed_condition(self, action_on_fail: Action, message_on_fail: str):
"""Process failed condition depending on action_on_fail."""
fullname = get_item_nodeid(self.item)
if action_on_fail == self.Action.FAIL_METHOD:
# FAIL_METHOD: skip execution of this block/test, set it to 'Fail' and continue with next test
assert False, message_on_fail
elif action_on_fail == self.Action.STOP_CONTEXT:
# STOP_CONTEXT: skip execution of this block, set it to 'Blocked' and continue with next block
self.data["blocked"] = True
raise MetaBlockAborted()
elif action_on_fail == self.Action.STOP_METHOD:
# STOP_METHOD: skip execution of this block/test, set it to 'Blocked' and continue with next test
self.data["blocked"] = True
pytest.skip(msg=f"Blocked. {self.item_name} failed: {message_on_fail}")
elif action_on_fail == self.Action.STOP_SESSION:
# STOP_SESSION: skip execution of this block/test, set it to 'Blocked' and block following tests as well
for item in self.items:
item.add_marker("block")
self.adaptavist.test_result_data[fullname]["blocked"] = True
self.adaptavist.test_result_data[fullname]["comment"] = f"Blocked. {self.item_name} failed: {message_on_fail}"
assert False, message_on_fail
elif action_on_fail == self.Action.FAIL_SESSION:
# FAIL_SESSION: skip execution of this block/test, set it to 'Fail' and block following tests
for item in self.items:
if item.name not in self.item.name:
item.add_marker("block")
self.adaptavist.test_result_data[fullname]["blocked"] = True
self.adaptavist.test_result_data[fullname]["comment"] = f"Blocked. {self.item_name} failed: {message_on_fail}"
assert False, message_on_fail
elif action_on_fail == self.Action.STOP_EXIT_SESSION:
# EXIT_SESSION: skip execution of this block/test, set it to 'Blocked' and exit session
self.item.add_marker("block")
pytest.exit(msg=f"Exiting pytest. {self.item_name} failed: {message_on_fail}", returncode=1)
elif action_on_fail == self.Action.FAIL_EXIT_SESSION:
# EXIT_SESSION: skip execution of this block/test, set it to 'Blocked' and exit session
pytest.exit(msg=f"Exiting pytest. {self.item_name} failed: {message_on_fail}")
else:
# CONTINUE: try to collect failed assumption, set result to 'Fail' and continue
pytest.assume(expr=False, msg=message_on_fail) # type:ignore
@singledispatch
def _read_attachment(attachment: Any) -> Tuple[BytesIO, str]:
"""Read content of an attachment."""
raise TypeError(f"Type {type(attachment)} is not supported for attachments.")
@_read_attachment.register
def _(attachment: str) -> Tuple[BytesIO, str]:
"""Read content of an attachment given with filename."""
with open(attachment, "rb") as file_pointer:
return BytesIO(file_pointer.read()), file_pointer.name
@_read_attachment.register # type: ignore
def _(attachment: BufferedReader) -> Tuple[BytesIO, str]:
"""Read content of an attachment given as file pointer."""
return BytesIO(attachment.read()), attachment.name
| StarcoderdataPython |
4848212 | import argparse
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
from random import shuffle
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
import data_
from baselines.graphvae.model import GraphVAE
from baselines.graphvae.data import GraphAdjSampler
CUDA = 1
LR_milestones = [500, 1000]
def build_model(args, max_num_nodes):
out_dim = max_num_nodes * (max_num_nodes + 1) // 2
if args.feature_type == 'id':
input_dim = max_num_nodes
elif args.feature_type == 'deg':
input_dim = 1
elif args.feature_type == 'struct':
input_dim = 2
model = GraphVAE(input_dim, 64, 256, max_num_nodes)
return model
def train(args, dataloader, model):
epoch = 1
optimizer = optim.Adam(list(model.parameters()), lr=args.lr)
scheduler = MultiStepLR(optimizer, milestones=LR_milestones, gamma=args.lr)
model.train()
for epoch in range(5000):
for batch_idx, data in enumerate(dataloader):
model.zero_grad()
features = data['features'].float()
adj_input = data['adj'].float()
features = Variable(features).cuda()
adj_input = Variable(adj_input).cuda()
loss = model(features, adj_input)
print('Epoch: ', epoch, ', Iter: ', batch_idx, ', Loss: ', loss)
loss.backward()
optimizer.step()
scheduler.step()
break
def arg_parse():
parser = argparse.ArgumentParser(description='GraphVAE arguments.')
io_parser = parser.add_mutually_exclusive_group(required=False)
io_parser.add_argument('--dataset', dest='dataset',
help='Input dataset.')
parser.add_argument('--lr', dest='lr', type=float,
help='Learning rate.')
parser.add_argument('--batch_size', dest='batch_size', type=int,
help='Batch size.')
parser.add_argument('--num_workers', dest='num_workers', type=int,
help='Number of workers to load data.')
parser.add_argument('--max_num_nodes', dest='max_num_nodes', type=int,
help='Predefined maximum number of nodes in train/test graphs. -1 if determined by \
training data.')
parser.add_argument('--feature', dest='feature_type',
help='Feature used for encoder. Can be: id, deg')
parser.set_defaults(dataset='grid',
feature_type='id',
lr=0.001,
batch_size=1,
num_workers=1,
max_num_nodes=-1)
return parser.parse_args()
def main():
prog_args = arg_parse()
os.environ['CUDA_VISIBLE_DEVICES'] = str(CUDA)
print('CUDA', CUDA)
### running log
if prog_args.dataset == 'enzymes':
graphs = data_.Graph_load_batch(min_num_nodes=10, name='ENZYMES')
num_graphs_raw = len(graphs)
elif prog_args.dataset == 'grid':
graphs = []
for i in range(2, 4):
for j in range(2, 4):
graphs.append(nx.grid_2d_graph(i, j))
num_graphs_raw = len(graphs)
if prog_args.max_num_nodes == -1:
max_num_nodes = max([graphs[i].number_of_nodes() for i in range(len(graphs))])
else:
max_num_nodes = prog_args.max_num_nodes
# remove graphs with number of nodes greater than max_num_nodes
graphs = [g for g in graphs if g.number_of_nodes() <= max_num_nodes]
graphs_len = len(graphs)
print('Number of graphs removed due to upper-limit of number of nodes: ',
num_graphs_raw - graphs_len)
graphs_test = graphs[int(0.8 * graphs_len):]
# graphs_train = graphs[0:int(0.8*graphs_len)]
graphs_train = graphs
print('total graph num: {}, training set: {}'.format(len(graphs), len(graphs_train)))
print('max number node: {}'.format(max_num_nodes))
dataset = GraphAdjSampler(graphs_train, max_num_nodes, features=prog_args.feature_type)
# sample_strategy = torch.utils.data.sampler.WeightedRandomSampler(
# [1.0 / len(dataset) for i in range(len(dataset))],
# num_samples=prog_args.batch_size,
# replacement=False)
dataset_loader = torch.utils.data.DataLoader(
dataset,
batch_size=prog_args.batch_size,
num_workers=prog_args.num_workers)
model = build_model(prog_args, max_num_nodes).cuda()
train(prog_args, dataset_loader, model)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5112521 | <gh_stars>1-10
from kafka import KafkaProducer
from os import environ
import tweepy
import json
import logging
class KafkaStreamListener(tweepy.StreamListener):
def __init__(self, producer, topic):
super(KafkaStreamListener, self).__init__()
self._producer = producer
self._topic = topic
def on_data(self, raw_data):
self._producer \
.send(self._topic, json.loads(raw_data)) \
.add_callback(on_send_success) \
.add_errback(on_send_error)
logging.info(raw_data)
return True
def on_error(self, status):
logging.error(status)
def on_send_success(record_metadata):
logging.info((record_metadata.topic, record_metadata.partition, record_metadata.offset))
def on_send_error(e):
logging.error('Issue sending message', exc_info=e)
def start_sending_message(bootstrap_servers, topic, track):
auth = tweepy.OAuthHandler(environ['CONSUMER_API_KEY'], environ['CONSUMER_API_SECRET'])
auth.set_access_token(key=environ['ACCESS_TOKEN'], secret=environ['ACCESS_TOKEN_SECRET'])
kafka_producer = KafkaProducer(
bootstrap_servers=bootstrap_servers,
value_serializer=lambda m: json.dumps(m).encode('utf8'),
retries=5
)
kafka_stream_listener = KafkaStreamListener(
producer=kafka_producer,
topic=topic
)
tweepy_stream = tweepy.Stream(auth=auth, listener=kafka_stream_listener)
logging.info('Starting twitter stream...')
tweepy_stream.filter(track=track)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
producer_logger = logging.getLogger('producer')
start_sending_message(
bootstrap_servers=['192.168.0.12:9092'],
topic='test1',
track=['weather']
)
| StarcoderdataPython |
3470693 | <gh_stars>10-100
#Clair3 pileup parameters
REPO_NAME="Clair3"
import re
from itertools import accumulate
zstd='zstd'
default_optimizer = "Radam"
default_loss_function = "FocalLoss"
support_platform = {'ont', 'hifi','ilmn'}
min_af = 0.08
min_af_dict = {'ont':0.15, 'hifi':min_af, 'ilmn':min_af }
#as three platform training data vary in depth distribution, we recommend below max_depth base on max training data depth for calling
max_depth = 144
max_depth_dict = {'ont':max_depth, 'hifi':max_depth, 'ilmn':max_depth}
maximum_variant_length_that_need_infer = 50
maximum_variant_length_that_need_infer_include_long_indel = 100000
cal_precise_long_indel_af = False
long_indel_distance_proportion = 0.1
min_mq = 5
min_bq = 0
min_coverage = 2
tensorflow_threads = 4
#GVCF parameters
base_err = 0.001
gq_bin_size = 5
#Pileup input feature list
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
channel = ('A', 'C', 'G', 'T', 'I', 'I1', 'D', 'D1', '*', 'a', 'c', 'g','t', 'i', 'i1','d', 'd1','#')
channel_size = len(channel)
flankingBaseNum = 16
no_of_positions = 2 * flankingBaseNum + 1
ont_input_shape = input_shape = [no_of_positions, channel_size]
label_shape = [21, 3, no_of_positions, no_of_positions]
label_size = sum(label_shape)
label_shape_cum = list(accumulate(label_shape))
expandReferenceRegion = 1000
SAMTOOLS_VIEW_FILTER_FLAG = 2316
partition_size = 500000
region_size =1000
phasing_window_size = 30000
extend_bp=10
#Training hyperparameters
chunk_size = 250
trainBatchSize = 2000
predictBatchSize = 200
initialLearningRate = 1e-3
trainingDatasetPercentage = 0.90
l2RegularizationLambda = 0.0001
maxEpoch = 30
RANDOM_SEED = None
OPERATION_SEED = None
| StarcoderdataPython |
11394315 | <filename>client/verta/verta/_swagger/_public/modeldb/model/PathLocationTypeEnumPathLocationType.py
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class PathLocationTypeEnumPathLocationType(BaseType):
_valid_values = [
"LOCAL_FILE_SYSTEM",
"NETWORK_FILE_SYSTEM",
"HADOOP_FILE_SYSTEM",
"S3_FILE_SYSTEM",
]
def __init__(self, val):
if val not in PathLocationTypeEnumPathLocationType._valid_values:
raise ValueError('{} is not a valid value for PathLocationTypeEnumPathLocationType'.format(val))
self.value = val
def to_json(self):
return self.value
def from_json(v):
if isinstance(v, str):
return PathLocationTypeEnumPathLocationType(v)
else:
return PathLocationTypeEnumPathLocationType(PathLocationTypeEnumPathLocationType._valid_values[v])
| StarcoderdataPython |
11291095 | import tensorflow as tf
import numpy as np
from absl.flags import FLAGS
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
# y_true: (batch_size, (nbboxes), (x1, y1, x2, y2, class, best_anchor))
N = y_true.nrows()
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros(
(N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(y_true[i].nrows()):
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1/grid_size), tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
# tf.print(indexes.stack())
# tf.print(updates.stack())
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack())
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
y_train = y_train.merge_dims(1, 2)
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2),
(1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \
tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
# tf.ragged.argmax is not ready, some dirty code
anchor_idx_list = []
for row_index in range(FLAGS.batch_size):
iou_single = iou[row_index]
anchor_idx = tf.cast(tf.argmax(iou_single, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
anchor_idx_list.append(anchor_idx)
anchor_idx = tf.ragged.stack(anchor_idx_list)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(transform_targets_for_output(
y_train, grid_size, anchor_idxs))
grid_size *= 2
return tuple(y_outs)
IMAGE_FEATURE_MAP = {
"image": tf.io.FixedLenFeature([], tf.string),
"xmins": tf.io.VarLenFeature(tf.float32),
"ymins": tf.io.VarLenFeature(tf.float32),
"xmaxs": tf.io.VarLenFeature(tf.float32),
"ymaxs": tf.io.VarLenFeature(tf.float32),
"classes": tf.io.VarLenFeature(tf.int64)
}
def transform_images(x_train, size, pad=False, augment=False):
if pad:
x_train = tf.image.resize_with_pad(
x_train, size, size, method='bicubic', antialias=True)
else:
x_train = tf.image.resize(x_train, (size, size),
method='bicubic', antialias=True)
if augment:
x_train = augment_image(x_train)
x_train = x_train / 255
return x_train
def augment_image(image):
choice = np.random.randint(4)
if choice == 0:
image = tf.image.random_brightness(image, 0.05)
elif choice == 1:
image = tf.image.random_contrast(image, 0.75, 1.25)
elif choice == 2:
image = tf.image.random_hue(image, 0.01)
else:
image = tf.image.random_saturation(image, 0.75, 1.5)
return image
def parse_tfrecord(tfrecord, size, image_type):
x = tf.io.parse_single_example(tfrecord, IMAGE_FEATURE_MAP)
if image_type == 'png':
x_train = tf.image.decode_png(x['image'], channels=3)
elif image_type == 'jpg':
x_train = tf.image.decode_jpeg(x['image'], channels=3)
x_train = tf.image.resize(x_train, (size, size),
method='bicubic', antialias=True)
y_train = tf.stack([tf.sparse.to_dense(x['xmins']),
tf.sparse.to_dense(x['ymins']),
tf.sparse.to_dense(x['xmaxs']),
tf.sparse.to_dense(x['ymaxs']),
tf.cast(tf.sparse.to_dense(x['classes']),
tf.float32)], axis=1)
y_train = tf.RaggedTensor.from_row_splits(
y_train, [0, tf.shape(y_train)[0]])
return x_train, y_train
def load_tfrecord_dataset(file_pattern, size, image_type):
files = tf.data.Dataset.list_files(file_pattern)
dataset = files.flat_map(tf.data.TFRecordDataset)
return dataset.map(lambda x: parse_tfrecord(x, size, image_type))
| StarcoderdataPython |
6606015 | def surname(user_data, with_separator=None):
return [user_data[-1]]
| StarcoderdataPython |
3446958 | <reponame>igarny/pscheduler
#
# Limit-Related Pages
#
import pscheduler
from pschedulerapiserver import application
from flask import request
from .args import *
from .limitproc import *
from .response import *
from .util import *
@application.route("/limits", methods=['GET'])
def limits():
try:
proposal = arg_json('proposal')
except ValueError as ex:
return bad_request(str(ex))
if proposal is None:
return bad_request("No proposal provided")
(processor, whynot) = limitprocessor()
if processor is None:
return no_can_do("Limit processor is not initialized: {0}".format(whynot))
hints, error_response = request_hints();
if hints is None:
return error_response
passed, limits_passed, diags, _new_task, _priority \
= processor.process(proposal, hints)
return json_response({
"passed": passed,
"limits_passed": limits_passed,
"diags": diags
})
| StarcoderdataPython |
171142 | import tensorflow as tf
import data_loader_recsys
import generator_recsys
import utils
import shutil
import time
import math
import eval
import numpy as np
import argparse
#check whether the files exists or not, "Data/Models/generation_model/model_nextitnet.ckpt"
# if yes run this file directly, if not run nextitrec.py first, which is the training file.
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--top_k', type=int, default=5,
help='Sample from top k predictions')
parser.add_argument('--beta1', type=float, default=0.9,
help='hyperpara-Adam')
parser.add_argument('--datapath', type=str, default='Data/Session/user-filter-20000items-session5.csv',
help='data path')
parser.add_argument('--eval_iter', type=int, default=10,
help='Sample generator output evry x steps')
parser.add_argument('--save_para_every', type=int, default=10,
help='save model parameters every')
parser.add_argument('--tt_percentage', type=float, default=0.2,
help='default=0.2 means 80% training 20% testing')
parser.add_argument('--is_generatesubsession', type=bool, default=False,
help='whether generating a subsessions, e.g., 12345-->01234,00123,00012 It may be useful for very some very long sequences')
args = parser.parse_args()
dl = data_loader_recsys.Data_Loader({'model_type': 'generator', 'dir_name': args.datapath})
all_samples = dl.item
items = dl.item_dict
print "len(items)",len(items)
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(all_samples)))
all_samples = all_samples[shuffle_indices]
# Split train/test set
dev_sample_index = -1 * int(args.tt_percentage * float(len(all_samples)))
train_set, valid_set = all_samples[:dev_sample_index], all_samples[dev_sample_index:]
model_para = {
#all parameters shuold be consist with those in nextitred.py!!!!
'item_size': len(items),
'dilated_channels': 100,
'dilations': [1, 2,],
'kernel_size': 3,
'learning_rate':0.001,
'batch_size':32,
'iterations':2,#useless, can be removed
'is_negsample':False #False denotes no negative sampling
}
itemrec = generator_recsys.NextItNet_Decoder(model_para)
itemrec.train_graph(model_para['is_negsample'])
itemrec.predict_graph(model_para['is_negsample'],reuse=True)
sess= tf.Session()
init=tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess,"Data/Models/generation_model/model_nextitnet.ckpt")
batch_no_test = 0
batch_size_test = model_para['batch_size']
curr_preds_5 = []
rec_preds_5 = [] # 1
ndcg_preds_5 = [] # 1
curr_preds_20 = []
rec_preds_20 = [] # 1
ndcg_preds_20 = [] # 1
while (batch_no_test + 1) * batch_size_test < valid_set.shape[0]:
item_batch = valid_set[batch_no_test * batch_size_test: (batch_no_test + 1) * batch_size_test, :]
[probs] = sess.run(
[itemrec.g_probs],
feed_dict={
itemrec.input_predict: item_batch
})
for bi in range(probs.shape[0]):
pred_items_5 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k) # top_k=5
pred_items_20 = utils.sample_top_k(probs[bi][-1], top_k=args.top_k + 15)
true_item = item_batch[bi][-1]
predictmap_5 = {ch: i for i, ch in enumerate(pred_items_5)}
pred_items_20 = {ch: i for i, ch in enumerate(pred_items_20)}
rank_5 = predictmap_5.get(true_item)
rank_20 = pred_items_20.get(true_item)
if rank_5 == None:
curr_preds_5.append(0.0)
rec_preds_5.append(0.0) # 2
ndcg_preds_5.append(0.0) # 2
else:
MRR_5 = 1.0 / (rank_5 + 1)
Rec_5 = 1.0 # 3
ndcg_5 = 1.0 / math.log(rank_5 + 2, 2) # 3
curr_preds_5.append(MRR_5)
rec_preds_5.append(Rec_5) # 4
ndcg_preds_5.append(ndcg_5) # 4
if rank_20 == None:
curr_preds_20.append(0.0)
rec_preds_20.append(0.0) # 2
ndcg_preds_20.append(0.0) # 2
else:
MRR_20 = 1.0 / (rank_20 + 1)
Rec_20 = 1.0 # 3
ndcg_20 = 1.0 / math.log(rank_20 + 2, 2) # 3
curr_preds_20.append(MRR_20)
rec_preds_20.append(Rec_20) # 4
ndcg_preds_20.append(ndcg_20) # 4
batch_no_test += 1
print "BATCH_NO: {}".format(batch_no_test)
print "Accuracy mrr_5:", sum(curr_preds_5) / float(len(curr_preds_5)) # 5
print "Accuracy mrr_20:", sum(curr_preds_20) / float(len(curr_preds_20)) # 5
print "Accuracy hit_5:", sum(rec_preds_5) / float(len(rec_preds_5)) # 5
print "Accuracy hit_20:", sum(rec_preds_20) / float(len(rec_preds_20)) # 5
print "Accuracy ndcg_5:", sum(ndcg_preds_5) / float(len(ndcg_preds_5)) # 5
print "Accuracy ndcg_20:", sum(ndcg_preds_20) / float(len(ndcg_preds_20)) #
# print "curr_preds",curr_preds
if __name__ == '__main__':
main()
| StarcoderdataPython |
8103711 | <reponame>meunierd/nlzss
#!/usr/bin/env python3
import sys
from sys import stdin, stdout, stderr, exit
from os import SEEK_SET, SEEK_CUR, SEEK_END
from errno import EPIPE
from struct import pack, unpack
__all__ = ('decompress', 'decompress_file', 'decompress_bytes',
'decompress_overlay', 'DecompressionError')
class DecompressionError(ValueError):
pass
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress_raw_lzss10(indata, decompressed_size, _overlay=False):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
if _overlay:
disp_extra = 3
else:
disp_extra = 1
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def readshort():
# big-endian
a = next(it)
b = next(it)
return (a << 8) | b
def copybyte():
data.append(next(it))
while len(data) < decompressed_size:
b = readbyte()
flags = bits(b)
for flag in flags:
if flag == 0:
copybyte()
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
disp = (sh & 0xfff) + disp_extra
for _ in range(count):
writebyte(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
if len(data) != decompressed_size:
raise DecompressionError("decompressed size does not match the expected size")
return data
def decompress_raw_lzss11(indata, decompressed_size):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def copybyte():
data.append(next(it))
while len(data) < decompressed_size:
b = readbyte()
flags = bits(b)
for flag in flags:
if flag == 0:
copybyte()
elif flag == 1:
b = readbyte()
indicator = b >> 4
if indicator == 0:
# 8 bit count, 12 bit disp
# indicator is 0, don't need to mask b
count = (b << 4)
b = readbyte()
count += b >> 4
count += 0x11
elif indicator == 1:
# 16 bit count, 12 bit disp
count = ((b & 0xf) << 12) + (readbyte() << 4)
b = readbyte()
count += b >> 4
count += 0x111
else:
# indicator is count (4 bits), 12 bit disp
count = indicator
count += 1
disp = ((b & 0xf) << 8) + readbyte()
disp += 1
try:
for _ in range(count):
writebyte(data[-disp])
except IndexError:
raise Exception(count, disp, len(data), sum(1 for x in it) )
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
if len(data) != decompressed_size:
raise DecompressionError("decompressed size does not match the expected size")
return data
def decompress_overlay(f, out):
# the compression header is at the end of the file
f.seek(-8, SEEK_END)
header = f.read(8)
# decompression goes backwards.
# end < here < start
# end_delta == here - decompression end address
# start_delta == decompression start address - here
end_delta, start_delta = unpack("<LL", header)
filelen = f.tell()
padding = end_delta >> 0x18
end_delta &= 0xFFFFFF
decompressed_size = start_delta + end_delta
f.seek(-end_delta, SEEK_END)
data = bytearray()
data.extend(f.read(end_delta - padding))
data.reverse()
#stdout.write(data.tostring())
uncompressed_data = decompress_raw_lzss10(data, decompressed_size,
_overlay=True)
uncompressed_data.reverse()
# first we write up to the portion of the file which was "overwritten" by
# the decompressed data, then the decompressed data itself.
# i wonder if it's possible for decompression to overtake the compressed
# data, so that the decompression code is reading its own output...
f.seek(0, SEEK_SET)
out.write(f.read(filelen - end_delta))
out.write(uncompressed_data)
def decompress(obj):
"""Decompress LZSS-compressed bytes or a file-like object.
Shells out to decompress_file() or decompress_bytes() depending on
whether or not the passed-in object has a 'read' attribute or not.
Returns a bytearray."""
if hasattr(obj, 'read'):
return decompress_file(obj)
else:
return decompress_bytes(obj)
def decompress_bytes(data):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
header = data[:4]
if header[0] == 0x10:
decompress_raw = decompress_raw_lzss10
elif header[0] == 0x11:
decompress_raw = decompress_raw_lzss11
else:
raise DecompressionError("not as lzss-compressed file")
decompressed_size, = unpack("<L", header[1:] + b'\x00')
data = data[4:]
return decompress_raw(data, decompressed_size)
def decompress_file(f):
"""Decompress an LZSS-compressed file. Returns a bytearray.
This isn't any more efficient than decompress_bytes, as it reads
the entire file into memory. It is offered as a convenience.
"""
header = f.read(4)
if header[0] == 0x10:
decompress_raw = decompress_raw_lzss10
elif header[0] == 0x11:
decompress_raw = decompress_raw_lzss11
else:
raise DecompressionError("not as lzss-compressed file")
decompressed_size, = unpack("<L", header[1:] + b'\x00')
data = f.read()
return decompress_raw(data, decompressed_size)
def main(args=None):
if args is None:
args = sys.argv[1:]
if '--overlay' in args:
args.remove('--overlay')
overlay = True
else:
overlay = False
if len(args) < 1 or args[0] == '-':
if overlay:
print("Can't decompress overlays from stdin", file=stderr)
return 2
if hasattr(stdin, 'buffer'):
f = stdin.buffer
else:
f = stdin
else:
try:
f = open(args[0], "rb")
except IOError as e:
print(e, file=stderr)
return 2
stdout = sys.stdout
if hasattr(stdout, 'buffer'):
# grab the underlying binary stream
stdout = stdout.buffer
try:
if overlay:
decompress_overlay(f, stdout)
else:
stdout.write(decompress_file(f))
except IOError as e:
if e.errno == EPIPE:
# don't complain about a broken pipe
pass
else:
raise
except (DecompressionError,) as e:
print(e, file=stderr)
return 1
return 0
if __name__ == '__main__':
exit(main())
| StarcoderdataPython |
81167 | from flask_restful import Resource
from flask import request, jsonify, g
from marshmallow import ValidationError
from ..models import Todo , todo_schema, todos_schema, todo_schema_include_items
from ..util import validate_request, jwt_required
class TodoListResource(Resource):
@validate_request
@jwt_required
def post(self):
payload = request.get_json()
try:
todo_schema.load(payload)
except ValidationError as err:
return {"message": "validation failed", "errors": err.messages}, 422
todo = Todo(title=payload["title"], user_id=g.current_user["id"])
todo.save()
return {"message": "todo created", "todo": todo_schema.dump(todo)}, 201
@jwt_required
def get(self):
todos = Todo.query.filter_by(user_id=g.current_user["id"])
todos_schema.dump(todos)
return {"message": "todos retrieved", "todos": todos_schema.dump(todos)}, 200
class TodoResource(Resource):
@jwt_required
def get(self, todo_id):
user_id = g.current_user["id"]
todo = Todo.query.get(todo_id)
if not todo:
return {"message": "todo does not exist"}, 404
if todo.user_id != user_id:
return {"message": "unauthorized"}, 401
return {"message": "todo retrieved", "todo": todo_schema_include_items.dump(todo)}, 200
@jwt_required
def delete(self, todo_id):
user_id = g.current_user["id"]
todo = Todo.query.get(todo_id)
if not todo:
return {"message": "todo does not exist"}, 404
if todo.user_id != user_id:
return {"message": "unauthorized"}, 401
todo.delete()
return {"message": "todo deleted"} , 200
@validate_request
@jwt_required
def put(self, todo_id):
payload = request.get_json()
user_id = g.current_user["id"]
todo = Todo.query.get(todo_id)
if not todo:
return {"message": "todo does not exist"}, 404
if todo.user_id != user_id:
return {"message": "unauthorized"}, 401
try:
todo_schema.load(payload)
except ValidationError as err:
return {"message": "validation failed", "errors": err.messages}, 422
todo.title = payload["title"]
todo.save()
return {"message": "todo updated", "todo": todo_schema.dump(todo)}, 200
| StarcoderdataPython |
1850630 | import httpx
import pytest
from rubrix import DEFAULT_API_KEY
from rubrix.client import AuthenticatedClient, whoami
from rubrix.client.sdk.users.models import User
from tests.server.test_helpers import client
@pytest.fixture
def sdk_client():
return AuthenticatedClient(base_url="http://localhost:6900", token=DEFAULT_API_KEY)
def test_whoami(sdk_client, monkeypatch):
monkeypatch.setattr(httpx, "get", client.get)
response = whoami(client=sdk_client)
assert response.status_code == 200
assert isinstance(response.parsed, User)
| StarcoderdataPython |
6458748 | #!/opt/bin/lv_micropython -i
import lvgl as lv
import display_driver
| StarcoderdataPython |
4863039 | from mode import Mode
from colormath.color_objects import XYZColor, sRGBColor
from colormath.color_conversions import convert_color
import re
class RGB(Mode):
@staticmethod
def get_params():
return('rgb', 'rgb_in_hex')
@staticmethod
def execute(light_utils, argument=None):
hex_pattern = '^[0-9a-fA-F]{6}|#[0-9a-fA-F]{6}$'
match = re.search(hex_pattern, argument)
hex_value = match.group(0).replace('#', '')
rgb_hex = tuple([hex_value[i:i+2] for i in range(0, len(hex_value), 2)])
red = rgb_hex[0]
green = rgb_hex[1]
blue = rgb_hex[2]
red, green, blue = int(red, 16), int(green, 16), int(blue, 16)
rgb = sRGBColor(red, green, blue)
xyz = convert_color(rgb, XYZColor)
x = xyz.xyz_x
y = xyz.xyz_y
z = xyz.xyz_z
final_x = x / (x + y + z)
final_y = y / (x + y + z)
xy = [final_x , final_y]
light_utils.set_color(xy)
| StarcoderdataPython |
3452427 | import formats.puzzle as pzd
from formats.binary import BinaryWriter
from formats.filesystem import NintendoDSRom
import unittest
import os
class TestPuzzleData(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
rom_path = os.path.dirname(__file__)
cls.rom = NintendoDSRom.fromFile(rom_path + "/../../test_rom.nds")
def get_pzd(self):
pz_data = pzd.Puzzle(rom=self.rom, id_=1)
pz_data.load_from_rom()
return pz_data
def test_loading_saving(self):
pz_data = self.get_pzd()
wtr = BinaryWriter()
pz_data.export_data(wtr)
assert wtr.data == pz_data.original
def test_values(self):
pz_data = self.get_pzd()
assert pz_data.title == "<NAME> Map"
assert pz_data.type == 26
assert pz_data.number == 1
assert pz_data.location_id == 91
assert pz_data.tutorial_id == 2
assert pz_data.reward_id == 255
assert pz_data.bg_btm_id == 1
assert pz_data.bg_top_id == 1
assert pz_data.judge_char == 0
assert pz_data.flag_bit2 is True
assert pz_data.flag_bit5 is True
assert pz_data.bg_lang is False
assert pz_data.ans_bg_lang is False
assert len(pz_data.correct_answer) == 55 # Not checking full answer, but the length
assert pz_data.correct_answer == "Excellent work!\n\nNow, let's hurry to the doctor's flat!"
assert pz_data.picarat_decay == [10, 9, 8]
def test_readable(self):
pz_data = self.get_pzd()
readable = pz_data.to_readable()
pz_data.from_readable(readable)
wtr = BinaryWriter()
pz_data.export_data(wtr)
assert wtr.data == pz_data.original
| StarcoderdataPython |
9726910 | <reponame>waterhorse1/Promp_test
from meta_policy_search import utils
from meta_policy_search.policies.base import Policy
from collections import OrderedDict
import tensorflow as tf
import numpy as np
class MetaAlgo(object):
"""
Base class for algorithms
Args:
policy (Policy) : policy object
"""
def __init__(self, policy):
assert isinstance(policy, Policy)
self.policy = policy
self._optimization_keys = None
def build_graph(self):
"""
Creates meta-learning computation graph
Pseudocode::
for task in meta_batch_size:
make_vars
init_dist_info_sym
for step in num_grad_steps:
for task in meta_batch_size:
make_vars
update_dist_info_sym
set objectives for optimizer
"""
raise NotImplementedError
def make_vars(self, prefix=''):
"""
Args:
prefix (str) : a string to prepend to the name of each variable
Returns:
(tuple) : a tuple containing lists of placeholders for each input type and meta task
"""
raise NotImplementedError
def _adapt_sym(self, surr_obj, params_var):
"""
Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj
Args:
surr_obj (tf_op) : tensorflow op for task specific (inner) objective
params_var (dict) : dict of placeholders for current policy params
Returns:
(dict): dict of tf.Tensors for adapted policy params
"""
raise NotImplementedError
def _adapt(self, samples):
"""
Performs MAML inner step for each task and stores resulting gradients # (in the policy?)
Args:
samples (list) : list of lists of samples (each is a dict) split by meta task
Returns:
None
"""
raise NotImplementedError
def optimize_policy(self, all_samples_data, log=True):
"""
Performs MAML outer step for each task
Args:
all_samples_data (list) : list of lists of lists of samples (each is a dict) split by gradient update and meta task
log (bool) : whether to log statistics
Returns:
None
"""
raise NotImplementedError
class MAMLAlgo(MetaAlgo):
"""
Provides some implementations shared between all MAML algorithms
Args:
policy (Policy): policy object
inner_lr (float) : gradient step size used for inner step
meta_batch_size (int): number of meta-learning tasks
num_inner_grad_steps (int) : number of gradient updates taken per maml iteration
trainable_inner_step_size (boolean): whether make the inner step size a trainable variable
"""
def __init__(self, policy, inner_lr=0.1, meta_batch_size=20, num_inner_grad_steps=1, trainable_inner_step_size=False):
super(MAMLAlgo, self).__init__(policy)
assert type(num_inner_grad_steps) and num_inner_grad_steps >= 0
assert type(meta_batch_size) == int
self.inner_lr = float(inner_lr)
self.meta_batch_size = meta_batch_size
self.num_inner_grad_steps = num_inner_grad_steps
self.trainable_inner_step_size = trainable_inner_step_size #TODO: make sure this actually works
self.adapt_input_ph_dict = None
self.adapted_policies_params = None
self.step_sizes = None
def _make_input_placeholders(self, prefix=''):
"""
Args:
prefix (str) : a string to prepend to the name of each variable
Returns:
(tuple) : a tuple containing lists of placeholders for each input type and meta task,
and for convenience, a list containing all placeholders created
"""
obs_phs, action_phs, adv_phs, dist_info_phs, dist_info_phs_list = [], [], [], [], []
dist_info_specs = self.policy.distribution.dist_info_specs
all_phs_dict = OrderedDict()
for task_id in range(self.meta_batch_size):
# observation ph
ph = tf.placeholder(dtype=tf.float32, shape=[None, self.policy.obs_dim], name='obs' + '_' + prefix + '_' + str(task_id))
all_phs_dict['%s_task%i_%s'%(prefix, task_id, 'observations')] = ph
obs_phs.append(ph)
# action ph
ph = tf.placeholder(dtype=tf.float32, shape=[None, self.policy.action_dim], name='action' + '_' + prefix + '_' + str(task_id))
all_phs_dict['%s_task%i_%s' % (prefix, task_id, 'actions')] = ph
action_phs.append(ph)
# advantage ph
ph = tf.placeholder(dtype=tf.float32, shape=[None], name='advantage' + '_' + prefix + '_' + str(task_id))
all_phs_dict['%s_task%i_%s' % (prefix, task_id, 'advantages')] = ph
adv_phs.append(ph)
# distribution / agent info
dist_info_ph_dict = {}
for info_key, shape in dist_info_specs:
ph = tf.placeholder(dtype=tf.float32, shape=[None] + list(shape), name='%s_%s_%i' % (info_key, prefix, task_id))
all_phs_dict['%s_task%i_agent_infos/%s' % (prefix, task_id, info_key)] = ph
dist_info_ph_dict[info_key] = ph
dist_info_phs.append(dist_info_ph_dict)
return obs_phs, action_phs, adv_phs, dist_info_phs, all_phs_dict
def _adapt_objective_sym(self, action_sym, adv_sym, dist_info_old_sym, dist_info_new_sym):
raise NotImplementedError
def _build_inner_adaption(self):
"""
Creates the symbolic graph for the one-step inner gradient update (It'll be called several times if
more gradient steps are needed)
Args:
some placeholders
Returns:
adapted_policies_params (list): list of Ordered Dict containing the symbolic post-update parameters
adapt_input_list_ph (list): list of placeholders
"""
obs_phs, action_phs, adv_phs, dist_info_old_phs, adapt_input_ph_dict = self._make_input_placeholders('adapt')
adapted_policies_params = []
for i in range(self.meta_batch_size):
with tf.variable_scope("adapt_task_%i" % i):
with tf.variable_scope("adapt_objective"):
distribution_info_new = self.policy.distribution_info_sym(obs_phs[i],
params=self.policy.policies_params_phs[i])
# inner surrogate objective
surr_obj_adapt = self._adapt_objective_sym(action_phs[i], adv_phs[i],
dist_info_old_phs[i], distribution_info_new)
# get tf operation for adapted (post-update) policy
with tf.variable_scope("adapt_step"):
adapted_policy_param = self._adapt_sym(surr_obj_adapt, self.policy.policies_params_phs[i])
adapted_policies_params.append(adapted_policy_param)
return adapted_policies_params, adapt_input_ph_dict
def _adapt_sym(self, surr_obj, params_var):
"""
Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj
Args:
surr_obj (tf_op) : tensorflow op for task specific (inner) objective
params_var (dict) : dict of tf.Tensors for current policy params
Returns:
(dict): dict of tf.Tensors for adapted policy params
"""
# TODO: Fix this if we want to learn the learning rate (it isn't supported right now).
update_param_keys = list(params_var.keys())
grads = tf.gradients(surr_obj, [params_var[key] for key in update_param_keys])
gradients = dict(zip(update_param_keys, grads))
# gradient descent
adapted_policy_params = [params_var[key] - tf.multiply(self.step_sizes[key], gradients[key])
for key in update_param_keys]
adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))
return adapted_policy_params_dict
def _adapt_sym_both(self, surr_obj, surr_obj2, params_var):
"""
Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj
Args:
surr_obj (tf_op) : tensorflow op for task specific (inner) objective
params_var (dict) : dict of tf.Tensors for current policy params
Returns:
(dict): dict of tf.Tensors for adapted policy params
"""
# TODO: Fix this if we want to learn the learning rate (it isn't supported right now).
update_param_keys = list(params_var.keys())
grads = tf.gradients(surr_obj, [params_var[key] for key in update_param_keys])
gradients = dict(zip(update_param_keys, grads))
grads2 = tf.gradients(surr_obj2, [params_var[key] for key in update_param_keys])
gradients2 = dict(zip(update_param_keys, grads2))
# gradient descent
adapted_policy_params = []
for key in update_param_keys:
params_hessian = params_var[key] - tf.multiply(self.step_sizes[key], gradients2[key])
params_pg = params_var[key] - tf.multiply(self.step_sizes[key], gradients[key])
params = tf.stop_gradient(params_pg) + params_hessian - tf.stop_gradient(params_hessian)
adapted_policy_params.append(params)
adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))
return adapted_policy_params_dict
def _adapt(self, samples):
"""
Performs MAML inner step for each task and stores the updated parameters in the policy
Args:
samples (list) : list of dicts of samples (each is a dict) split by meta task
"""
assert len(samples) == self.meta_batch_size
assert [sample_dict.keys() for sample_dict in samples]
#print(samples[0].keys())
sess = tf.get_default_session()
# prepare feed dict
input_dict = self._extract_input_dict(samples, self._optimization_keys, prefix='adapt')
input_ph_dict = self.adapt_input_ph_dict
feed_dict_inputs = utils.create_feed_dict(placeholder_dict=input_ph_dict, value_dict=input_dict)
feed_dict_params = self.policy.policies_params_feed_dict
feed_dict = {**feed_dict_inputs, **feed_dict_params} # merge the two feed dicts
# compute the post-update / adapted policy parameters
adapted_policies_params_vals = sess.run(self.adapted_policies_params, feed_dict=feed_dict)
# store the new parameter values in the policy
self.policy.update_task_parameters(adapted_policies_params_vals)
def _extract_input_dict(self, samples_data_meta_batch, keys, prefix=''):
"""
Re-arranges a list of dicts containing the processed sample data into a OrderedDict that can be matched
with a placeholder dict for creating a feed dict
Args:
samples_data_meta_batch (list) : list of dicts containing the processed data corresponding to each meta-task
keys (list) : a list of keys that should exist in each dict and whose values shall be extracted
prefix (str): prefix to prepend the keys in the resulting OrderedDict
Returns:
OrderedDict containing the data from all_samples_data. The data keys follow the naming convention:
'<prefix>_task<task_number>_<key_name>'
"""
assert len(samples_data_meta_batch) == self.meta_batch_size
input_dict = OrderedDict()
for meta_task in range(self.meta_batch_size):
extracted_data = utils.extract(
samples_data_meta_batch[meta_task], *keys
)
# iterate over the desired data instances and corresponding keys
for j, (data, key) in enumerate(zip(extracted_data, keys)):
if isinstance(data, dict):
# if the data instance is a dict -> iterate over the items of this dict
for k, d in data.items():
assert isinstance(d, np.ndarray)
input_dict['%s_task%i_%s/%s' % (prefix, meta_task, key, k)] = d
elif isinstance(data, np.ndarray):
input_dict['%s_task%i_%s'%(prefix, meta_task, key)] = data
else:
raise NotImplementedError
return input_dict
def _extract_input_dict_meta_op(self, all_samples_data, keys):
"""
Creates the input dict for all the samples data required to perform the meta-update
Args:
all_samples_data (list):list (len = num_inner_grad_steps + 1) of lists (len = meta_batch_size) containing
dicts that hold processed samples data
keys (list): a list of keys (str) that should exist in each dict and whose values shall be extracted
Returns:
"""
assert len(all_samples_data) == self.num_inner_grad_steps + 1
meta_op_input_dict = OrderedDict()
for step_id, samples_data in enumerate(all_samples_data): # these are the gradient steps
dict_input_dict_step = self._extract_input_dict(samples_data, keys, prefix='step%i'%step_id)
meta_op_input_dict.update(dict_input_dict_step)
return meta_op_input_dict
def _create_step_size_vars(self):
# Step sizes
with tf.variable_scope('inner_step_sizes'):
step_sizes = dict()
for key, param in self.policy.policy_params.items():
shape = param.get_shape().as_list()
init_stepsize = np.ones(shape, dtype=np.float32) * self.inner_lr
step_sizes[key] = tf.Variable(initial_value=init_stepsize,
name='%s_step_size' % key,
dtype=tf.float32, trainable=self.trainable_inner_step_size)
return step_sizes | StarcoderdataPython |
3283747 | '''
FileName: clipBoard.py
Author: Chuncheng
Version: V0.0
Purpose: Control Clip Board
'''
# %%
import time
import clipboard
# %%
default_message = time.ctime()
default_message
# %%
def copy(message=default_message):
'''
Method: copy
Copy [message] to the clipBoard,
Args:
- @message
Outputs:
- Success
'''
clipboard.copy(message)
print('D: Copy {} to clipboard'.format(clipboard.paste()))
# Add return value to prevent escape too early
return 0
# %%
if __name__ == '__main__':
msg = input('(Type to Copy) >> ')
copy(msg)
print('I: ByeBye.')
| StarcoderdataPython |
1893351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# originally contributed by:
#Copyright 2014 Battelle Memorial Institute
#Written by <NAME>
# With additions from <NAME>, Bareos GmbH & Co. KG 2015
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of version three of the GNU Affero General Public
# License as published by the Free Software Foundation, which is
# listed in the file LICENSE.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from bareosfd import *
from bareos_fd_consts import *
import os
from subprocess import *
from BareosFdPluginBaseclass import *
import BareosFdWrapper
class BareosFdMySQLclass (BareosFdPluginBaseclass):
'''
Plugin for backing up all mysql databases found in a specific mysql server
'''
def __init__(self, context, plugindef):
BareosFdPluginBaseclass.__init__(self, context, plugindef)
self.file=None
def parse_plugin_definition(self,context, plugindef):
'''
'''
BareosFdPluginBaseclass.parse_plugin_definition(self, context, plugindef)
# mysql host and credentials, by default we use localhost and root and
# prefer to have a my.cnf with mysql credentials
self.mysqlconnect = ''
if 'dumpbinary' in self.options:
self.dumpbinary = self.options['dumpbinary']
else:
self.dumpbinary = "mysqldump"
# if dumpotions is set, we use that completely here, otherwise defaults
if 'dumpoptions' in self.options:
self.dumpoptions = self.options['dumpoptions']
else:
self.dumpoptions = " --events --single-transaction "
# default is to add the drop statement
if not 'drop_and_recreate' in self.options or not self.options['drop_and_recreate'] == 'false':
self.dumpoptions += " --add-drop-database --databases "
# if defaultsfile is set
if 'defaultsfile' in self.options:
self.defaultsfile = self.options['defaultsfile']
self.mysqlconnect += " --defaults-file=" + self.defaultsfile
if 'mysqlhost' in self.options:
self.mysqlhost = self.options['mysqlhost']
self.mysqlconnect += " -h " + self.mysqlhost
if 'mysqluser' in self.options:
self.mysqluser = self.options['mysqluser']
self.mysqlconnect += " -u " + self.mysqluser
if 'mysqlpassword' in self.options:
self.mysqlpassword = self.options['mysqlpassword']
self.mysqlconnect += " --password=" + self.mysqlpassword
# if plugin has db configured (a list of comma separated databases to backup
# we use it here as list of databases to backup
if 'db' in self.options:
self.databases = self.options['db'].split(',')
# Otherwise we backup all existing databases
else:
showDbCommand = "mysql %s -B -N -e 'show databases'" %self.mysqlconnect
showDb = Popen(showDbCommand, shell=True, stdout=PIPE, stderr=PIPE)
self.databases = showDb.stdout.read().splitlines()
if 'performance_schema' in self.databases:
self.databases.remove('performance_schema')
if 'information_schema' in self.databases:
self.databases.remove('information_schema')
showDb.wait()
returnCode = showDb.poll()
if returnCode == None:
JobMessage(context, bJobMessageType['M_FATAL'], "No databases specified and show databases failed for unknown reason");
DebugMessage(context, 10, "Failed mysql command: '%s'" %showDbCommand)
return bRCs['bRC_Error'];
if returnCode != 0:
(stdOut, stdError) = showDb.communicate()
JobMessage(context, bJobMessageType['M_FATAL'], "No databases specified and show databases failed. %s" %stdError);
DebugMessage(context, 10, "Failed mysql command: '%s'" %showDbCommand)
return bRCs['bRC_Error'];
if 'ignore_db' in self.options:
DebugMessage(context, 100, "databases in ignore list: %s\n" %(self.options['ignore_db'].split(',')));
for ignored_cur in self.options['ignore_db'].split(','):
try:
self.databases.remove(ignored_cur)
except:
pass
DebugMessage(context, 100, "databases to backup: %s\n" %(self.databases));
return bRCs['bRC_OK'];
def start_backup_file(self,context, savepkt):
'''
This method is called, when Bareos is ready to start backup a file
For each database to backup we create a mysqldump subprocess, wrting to
the pipe self.stream.stdout
'''
DebugMessage(context, 100, "start_backup called\n");
if not self.databases:
DebugMessage(context,100,"No databases to backup")
JobMessage(context, bJobMessageType['M_ERROR'], "No databases to backup.\n");
return bRCs['bRC_Skip']
db = self.databases.pop()
sizeDbCommand = "mysql %s -B -N -e 'SELECT (SUM(DATA_LENGTH + INDEX_LENGTH)) FROM information_schema.TABLES WHERE TABLE_SCHEMA = \"%s\"'" %(self.mysqlconnect, db)
sizeDb = Popen(sizeDbCommand, shell=True, stdout=PIPE, stderr=PIPE)
size_curr_db = sizeDb.stdout.read()
sizeDb.wait()
sizereturnCode = sizeDb.poll()
statp = StatPacket()
if not size_curr_db == "NULL\n":
try:
statp.size = int(size_curr_db)
except ValueError:
pass
savepkt.statp = statp
savepkt.fname = "/_mysqlbackups_/"+db+".sql"
savepkt.type = bFileType['FT_REG']
dumpcommand = ("%s %s %s %s" %(self.dumpbinary, self.mysqlconnect, db, self.dumpoptions))
DebugMessage(context, 100, "Dumper: '" + dumpcommand + "'\n")
self.stream = Popen(dumpcommand, shell=True, stdout=PIPE, stderr=PIPE)
JobMessage(context, bJobMessageType['M_INFO'], "Starting backup of " + savepkt.fname + "\n");
return bRCs['bRC_OK'];
def plugin_io(self, context, IOP):
'''
Called for io operations. We read from pipe into buffers or on restore
create a file for each database and write into it.
'''
DebugMessage(context, 100, "plugin_io called with " + str(IOP.func) + "\n");
if IOP.func == bIOPS['IO_OPEN']:
try:
if IOP.flags & (os.O_CREAT | os.O_WRONLY):
self.file = open(IOP.fname, 'wb');
except Exception,msg:
IOP.status = -1;
DebugMessage(context, 100, "Error opening file: " + IOP.fname + "\n");
print msg;
return bRCs['bRC_Error'];
return bRCs['bRC_OK']
elif IOP.func == bIOPS['IO_READ']:
IOP.buf = bytearray(IOP.count)
IOP.status = self.stream.stdout.readinto(IOP.buf)
IOP.io_errno = 0
return bRCs['bRC_OK']
elif IOP.func == bIOPS['IO_WRITE']:
try:
self.file.write(IOP.buf);
IOP.status = IOP.count
IOP.io_errno = 0
except IOError,msg:
IOP.io_errno = -1
DebugMessage(context, 100, "Error writing data: " + msg + "\n");
return bRCs['bRC_OK'];
elif IOP.func == bIOPS['IO_CLOSE']:
if self.file:
self.file.close()
return bRCs['bRC_OK']
elif IOP.func == bIOPS['IO_SEEK']:
return bRCs['bRC_OK']
else:
DebugMessage(context,100,"plugin_io called with unsupported IOP:"+str(IOP.func)+"\n")
return bRCs['bRC_OK']
def end_backup_file(self, context):
'''
Check, if dump was successfull.
'''
# Usually the mysqldump process should have terminated here, but on some servers
# it has not always.
self.stream.wait()
returnCode = self.stream.poll()
if returnCode == None:
JobMessage(context, bJobMessageType['M_ERROR'], "Dump command not finished properly for unknown reason")
returnCode = -99
else:
DebugMessage(context, 100, "end_backup_file() entry point in Python called. Returncode: %d\n" %self.stream.returncode)
if returnCode != 0:
(stdOut, stdError) = self.stream.communicate()
if stdError == None:
stdError = ''
JobMessage(context, bJobMessageType['M_ERROR'], "Dump command returned non-zero value: %d, message: %s\n" %(returnCode,stdError));
if self.databases:
return bRCs['bRC_More']
else:
if returnCode == 0:
return bRCs['bRC_OK'];
else:
return bRCs['bRC_Error']
# vim: ts=4 tabstop=4 expandtab shiftwidth=4 softtabstop=4
| StarcoderdataPython |
6628542 | from typing import overload
@overload
def foo(value: None) -> None:
pass
@overload
def foo(value: int) -> str:
pass
@overload
def foo(value: str) -> str:
pass
def foo(value):
return None
foo(<arg1>) | StarcoderdataPython |
3250642 | #Copyright (c) 2009,2010 <NAME>
import numpy as num
import cudamat as cm
from cudamat import reformat
class LinearAutoencoder(object):
def __init__(self, numVis, numHid, mbsz = 256, initWeightSigma = 0.01):
self._mbsz = mbsz
self.numVis, self.numHid = numVis, numHid
self.visToHid = initWeightSigma*num.random.randn(numVis, numHid)
self.hidToVis = self.visToHid.transpose().copy()#initWeightSigma*num.random.randn(numHid, numVis)#
self.init_weight_storage()
self.initTemporary()
self.inp = None
self.learnRate = 0.0001
self.momentum = 0.9
def getMBSZ(self):
return self._mbsz
def setMBSZ(self, newMBSZ):
self._mbsz = newMBSZ
self.initTemporary()
mbsz = property(getMBSZ,setMBSZ)
def packWeights(self):
d = {}
self.visToHid.copy_to_host()
d["visToHid"] = self.visToHid.numpy_array.copy()
self.hidToVis.copy_to_host()
d["hidToVis"] = self.hidToVis.numpy_array.copy()
return d
def loadWeights(self, wDict):
for w_name in self.weightVariableNames():
assert( wDict.has_key(w_name) )
w = wDict[w_name]
assert( self.__dict__[w_name].numpy_array.shape == wDict[w_name].shape )
self.__dict__[w_name] = cm.CUDAMatrix(reformat(w))
def weightVariableNames(self):
return "visToHid", "hidToVis"
def init_weight_storage(self):
for name in self.weightVariableNames():
w = self.__dict__[name]
self.__dict__[name] = cm.CUDAMatrix(reformat(w))
self.__dict__["d"+name] = cm.CUDAMatrix(reformat(0.0 * w))
def initTemporary(self):
self.hid = cm.CUDAMatrix(reformat(num.zeros((self.numHid, self.mbsz))))
self.out = cm.CUDAMatrix(reformat(num.zeros((self.numVis, self.mbsz))))
self.delta = cm.CUDAMatrix(reformat(num.zeros((self.numHid, self.mbsz))))
self.tempVisMB = cm.CUDAMatrix(reformat(num.zeros((self.numVis, self.mbsz))))
def encode(self):
cm.dot(self.visToHid.T, self.inp, target = self.hid)
def decode(self):
cm.dot(self.hidToVis.T, self.hid, target = self.out)
def fprop(self):
"""
self.inp must reside on the gpu and be initialized correctly.
"""
self.encode()
self.decode()
def curRecErr(self):
self.inp.subtract(self.out, target = self.tempVisMB)
return self.tempVisMB.euclid_norm()**2
def bprop(self):
#apply momentum
self.dhidToVis.scalar_mult(self.momentum)
self.dvisToHid.scalar_mult(self.momentum)
#NB: we have the opposite sign convention here from the usual way
self.out.subtract(self.inp) # compute error
self.dhidToVis.add_dot(self.hid, self.out.T)
cm.dot(self.hidToVis, self.out, target = self.delta)
self.dvisToHid.add_dot(self.inp, self.delta.T)
def step(self, data):
if isinstance(data, cm.CUDAMatrix):
self.inp = data
else:
self.inp = cm.CUDAMatrix(reformat(data))
self.fprop()
recErr = self.curRecErr()
self.bprop()
for j, wname in enumerate(self.weightVariableNames()):
#NOTE THE UNUSUAL SIGN CONVENTION HERE
self.__dict__[wname].subtract_mult( self.__dict__["d"+wname], self.learnRate/self.mbsz )
return recErr
def train(self, freshData, epochs):
for ep in range(epochs):
recErr = 0.0
numMinibatches = 0
for mb in freshData():
recErr += self.step(mb)
numMinibatches += 1
yield recErr/numMinibatches/self.numVis/self.mbsz
| StarcoderdataPython |
11347599 | <filename>y-cruncher/src/results.py
#!/usr/bin/env python3
import collections
import datetime
import math
import os
import pathlib
import re
import sys
import urllib.parse
from lib.ParseCruncherValidation import ParseCruncherValidation
# see "What Constants are Tracked" at http://www.numberworld.org/y-cruncher/records.html
priorities = {
"Pi": 1,
"e": 2,
"Euler-Mascheroni Constant": 3,
"Sqrt(2)": 4,
"Sqrt(200)": 4,
"Golden Ratio": 5,
"Sqrt(125)": 5,
"Log(2)": 6,
"Zeta(3) - Apery's Constant": 7,
"Catalan's Constant": 8,
"Lemniscate": 9,
"Lemniscate Constant": 9,
"Gamma(¼)": 10,
"Gamma(⅓)": 11,
"Log(10)": 12,
"Zeta(5)": 13
}
def removesuffix(self, suffix):
# suffix='' should not call self[:-0].
if suffix and self.endswith(suffix):
return self[:-len(suffix)]
else:
return self[:]
def round_to_significant_figures(number, figures):
exp = math.ceil(math.log10(number)) - figures
pwr = 10**exp
return int(pwr * int(number / pwr))
suffixes = {12: 't', 9: 'b', 6: 'm', 3: 'k'}
def digit_string(digits):
power = 0
suffix = ''
for power, suffix in suffixes.items():
if digits >= 10**power:
break
return removesuffix(str(digits / 10**power), '.0') + suffix
re_filename_date_time = re.compile(r'.* - (\d{8})-(\d{6})')
base_dir = pathlib.Path(sys.path[0]).parent
fetch_dir = base_dir / 'fetch'
results_dir = base_dir / 'results'
results_by_instance_dir = results_dir / 'Tables by instance'
os.makedirs(results_by_instance_dir, exist_ok=True)
results_by_constant_dir = results_dir / 'Tables by constant'
os.makedirs(results_by_constant_dir, exist_ok=True)
best_times_by_constant_dir = results_dir / 'Best times by constant'
os.makedirs(best_times_by_constant_dir, exist_ok=True)
# sort fetched results into directory by platform / instance / constant
for filename in fetch_dir.glob('*.txt'):
validation = ParseCruncherValidation(filename)
dir = results_dir / validation.platform / validation.instance /'{} [{}]'.format(validation.constant.replace('/', '∕'), validation.algorithm)
os.makedirs(dir, exist_ok=True)
for ext in ['.cfg', '.out', '.txt']:
src = filename.with_suffix(ext)
dst = (dir / filename.stem).with_suffix(ext)
src.rename(dst)
# instance => digits
digits_by_instance = collections.defaultdict(set)
# constant => digits
digits_by_execution = collections.defaultdict(set)
# instance => execution => digit => data dictionary
executions_by_instance = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int)))
# execution => instance => digit => data dictionary
instances_by_execution = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(int)))
for platform in os.listdir(results_dir):
platform_dir = results_dir / platform
if not platform_dir.is_dir():
continue
for machine_type in os.listdir(platform_dir):
machine_type_dir = platform_dir / machine_type
if not machine_type_dir.is_dir():
continue
instance = (platform, machine_type)
for constant in os.listdir(machine_type_dir):
constant_dir = machine_type_dir / constant
if not constant_dir.is_dir():
continue
basename_groups = collections.defaultdict(set)
for filename in pathlib.Path.iterdir(constant_dir):
basename_groups[filename.with_suffix('')].add(filename.suffix)
if not filename.is_file() or filename.suffix != '.txt':
continue
validation = ParseCruncherValidation(filename)
execution = (validation.constant, validation.algorithm)
digits_by_instance[instance].add(validation.digits)
# don't display low-digits results in the execution tables
if validation.digits >= 25_000_000:
digits_by_execution[execution].add(validation.digits)
date = datetime.datetime.strptime(re_filename_date_time.search(filename.stem)[1], "%Y%m%d")
data = {'filename': filename, 'platform': validation.platform, 'instance': validation.instance,
'date': date, 'milliseconds': validation.milliseconds}
if validation.digits in executions_by_instance[instance][execution]:
old_data = executions_by_instance[instance][execution][validation.digits]
milliseconds = old_data['milliseconds']
current_filename = old_data['filename']
# only replace if faster
if validation.milliseconds < milliseconds or \
(validation.milliseconds == milliseconds and filename < current_filename):
print(f'removing {current_filename}')
current_filename.unlink()
current_filename.with_suffix('.cfg').unlink()
current_filename.with_suffix('.out').unlink()
executions_by_instance[instance][execution][validation.digits] = data
instances_by_execution[execution][instance][validation.digits] = data
else:
print(f'removing {filename}')
filename.unlink()
filename.with_suffix('.cfg').unlink()
filename.with_suffix('.out').unlink()
else:
executions_by_instance[instance][execution][validation.digits] = data
instances_by_execution[execution][instance][validation.digits] = data
for basename, group in basename_groups.items():
if group != {'.cfg', '.out', '.txt'}:
print(f"missing files for '{basename} with {group}'")
# file per instance
# table execution x digits
# cells milliseconds and digits/sec
for instance, executions in executions_by_instance.items():
platform, machine_type = instance
digits = digits_by_instance[instance]
with open(results_by_instance_dir / '{} [{}].csv'.format(platform, machine_type), 'w') as file:
file.write(',')
for digit in sorted(digits):
file.write(',')
file.write(digit_string(digit))
file.write(',')
file.write('\n')
for execution in sorted(executions):
constant, algorithm = execution
times = executions[execution]
file.write('"{} [{}]",'.format(constant, algorithm))
if constant in priorities:
file.write(str(priorities[constant]))
for digit in sorted(digits):
file.write(',')
if digit in times:
data = times[digit]
milliseconds = data['milliseconds']
file.write('{:.3f}'.format(milliseconds / 1000))
file.write(',')
rate = digit * 1000 / milliseconds
file.write('"{:,}"'.format(round_to_significant_figures(rate, 3)))
else:
file.write(',')
file.write('\n')
# file per constant
# table instance x digits
# cells milliseconds and digits/sec
for execution, instances in instances_by_execution.items():
constant, algorithm = execution
digits = sorted(digits_by_execution[execution])
# digit => milliseconds
best_times = {}
with open(results_by_constant_dir / '{} [{}].csv'.format(constant, algorithm), 'w') as file:
for digit in digits:
file.write(',')
file.write(digit_string(digit))
file.write(',')
file.write('\n')
for instance in sorted(instances):
platform, machine_type = instance
times = instances[instance]
file.write('"{} [{}]",'.format(platform, machine_type))
for idx, digit in enumerate(digits):
if idx > 0:
file.write(',')
if digit in times:
data = times[digit]
milliseconds = data['milliseconds']
file.write('{:.3f}'.format(milliseconds / 1000))
file.write(',')
rate = round_to_significant_figures(digit * 1000 / milliseconds, 3)
file.write('"{:,}"'.format(rate))
if digit not in best_times or milliseconds < best_times[digit]['milliseconds']:
best_times[digit] = data
else:
file.write(',')
file.write('\n')
# create markdown files with links to the fastest execution for each computation size
with open(best_times_by_constant_dir / f'{constant} [{algorithm}].md', 'w') as file:
file.write('| Digits | Seconds | Digits / Second | Platform | Instance | Date | Files |\n')
file.write('| ------ | ------- | --------------- | -------- | -------- | ---- | ----- |\n')
for digit in sorted(best_times):
data = best_times[digit]
filename = data['filename']
platform = data['platform']
instance = data['instance']
date = data['date']
milliseconds = data['milliseconds']
rate = round_to_significant_figures(digit * 1000 / milliseconds, 3)
link = pathlib.Path('..') / filename.relative_to(results_dir).with_suffix('')
quoted = urllib.parse.quote(str(link))
file.write(f'| {digit_string(digit)} | {milliseconds/1000:.3f} | {rate:,} | {platform} | {instance} | {date:%Y-%m-%d} | ')
for suffix in ['cfg', 'out', 'txt']:
file.write(f'[{suffix}]({quoted}.{suffix}) ')
file.write(f'|\n')
| StarcoderdataPython |
381482 | <reponame>RichardOkubo/PythonScripts<gh_stars>0
"""Avaliador de relação custo-benefício."""
import skfuzzy as fuzz
from numpy import arange
from skfuzzy import control as ctrl
def main():
"""Executa o simulador."""
print("\n------------------------- SIMULADOR -------------------------\n")
custo_max = round(
float(
input(
"""Qual é o custo máximo que você está
disposto a pagar?
::: R$"""
)
)
)
print()
custo_ = float(
input(
f"""Em um intervalo de 0 a {round(custo_max)},
Qual é o custo?
::: R$"""
)
)
print()
beneficio_ = int(
input(
"""Em uma escala de 1 a 5, como você avalia o
benefício esperado?
[ 1 ] baixo
[ 2 ] baixo-moderado
[ 3 ] moderado
[ 4 ] moderado-alto
[ 5 } alto
::: """
)
)
print()
while True:
sim_nao = input(
"""Mostrar status do simulador? [s/n]
::: """
).lower()
if sim_nao in "sn":
status = True if sim_nao == "s" else False
break
print()
print(
custo_beneficio_(
custo=custo_,
beneficio=beneficio_,
status_=status,
modelo=lógica_fuzzy(max_=custo_max),
)
)
print("\n---------------------- FIM DO PROGRAMA ----------------------\n")
def custo_beneficio_(
custo: float = 50.0, beneficio: int = 3, status_=False, modelo: callable = None
) -> str:
"""Avalia o custo-benefício, dado as valores de entrada do usuário."""
modelo.input["custo"] = custo
modelo.input["benefício"] = beneficio
modelo.compute()
resultado = modelo.output["custo-benefício"]
if resultado < (100 * 1 / 3):
return f"""SUGESÃO: custo-benefício BAIXO;
logo, não aceite. {f"¨STATUS: {resultado:.1f}" if status_ else ""}"""
elif resultado < (100 * 2 / 3):
return f"""SUGESÃO: custo-benefício MODERADO;
logo, negocie. {f"STATUS: {resultado:.1f}" if status_ else ""}"""
else:
return f"""SUGESÃO: custo-benefício Alto;
logo, aceite. {f"STATUS: {resultado:.1f}" if status_ else ""}"""
def lógica_fuzzy(max_=101):
custo = ctrl.Antecedent(universe=arange(max_ + 1), label="custo")
beneficio = ctrl.Antecedent(universe=arange(1, 6), label="benefício")
custo_beneficio = ctrl.Consequent(universe=arange(101), label="custo-benefício")
custo.automf(names=["baixo", "moderado", "alto"])
beneficio.automf(variable_type="quant", names=["baixo", "moderado", "alto"])
custo_beneficio.automf(names=["baixo", "moderado", "alto"])
# custo.view()
# beneficio.view()
# custo_beneficio.view()
regra_1 = ctrl.Rule(
antecedent=(custo["baixo"] & beneficio["alto"]),
consequent=custo_beneficio["alto"],
label="regra 1",
)
regra_2 = ctrl.Rule(
antecedent=(custo["moderado"] & beneficio["alto"]),
consequent=custo_beneficio["alto"],
label="regra 2",
)
regra_3 = ctrl.Rule(
antecedent=(custo["baixo"] & beneficio["moderado"]),
consequent=custo_beneficio["alto"],
label="regra 3",
)
regra_4 = ctrl.Rule(
antecedent=(custo["baixo"] & beneficio["baixo"]),
consequent=custo_beneficio["moderado"],
label="regra 4",
)
regra_5 = ctrl.Rule(
antecedent=(custo["moderado"] & beneficio["moderado"]),
consequent=custo_beneficio["moderado"],
label="regra 5",
)
regra_6 = ctrl.Rule(
antecedent=(custo["alto"] & beneficio["alto"]),
consequent=custo_beneficio["moderado"],
label="regra 6",
)
regra_7 = ctrl.Rule(
antecedent=(custo["alto"] & beneficio["moderado"]),
consequent=custo_beneficio["baixo"],
label="regra 7",
)
regra_8 = ctrl.Rule(
antecedent=(custo["moderado"] & beneficio["baixo"]),
consequent=custo_beneficio["baixo"],
label="regra 8",
)
regra_9 = ctrl.Rule(
antecedent=(custo["alto"] & beneficio["baixo"]),
consequent=custo_beneficio["baixo"],
label="regra 9",
)
sistema_de_controle = ctrl.ControlSystem(
rules=[
regra_1, regra_2, regra_3, regra_4, regra_5, regra_6, regra_7,
regra_8, regra_9,
]
)
simulador = ctrl.ControlSystemSimulation(control_system=sistema_de_controle)
# custo.view(sim=simulador)
# beneficio.view(sim=simulador)
# custo_beneficio.view(sim=simulador)
return simulador
if __name__ == "__main__":
main()
input()
| StarcoderdataPython |
22276 | ##
## Evaluation Script
##
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
def evaluate(label_indices = {'brick': 0, 'ball': 1, 'cylinder': 2},
channel_means = np.array([147.12697, 160.21092, 167.70029]),
data_path = '../data',
minibatch_size = 32,
num_batches_to_test = 10,
checkpoint_dir = 'tf_data/sample_model'):
print("1. Loading data")
data = data_loader(label_indices = label_indices,
channel_means = channel_means,
train_test_split = 0.5,
data_path = data_path)
print("2. Instantiating the model")
M = Model(mode = 'test')
#Evaluate on test images:
GT = Generator(data.test.X, data.test.y, minibatch_size = minibatch_size)
num_correct = 0
num_total = 0
print("3. Evaluating on test images")
for i in range(num_batches_to_test):
GT.generate()
yhat = M.predict(X = GT.X, checkpoint_dir = checkpoint_dir)
correct_predictions = (np.argmax(yhat, axis = 1) == np.argmax(GT.y, axis = 1))
num_correct += np.sum(correct_predictions)
num_total += len(correct_predictions)
accuracy = round(num_correct/num_total,4)
return accuracy
def calculate_score(accuracy):
score = 0
if accuracy >= 0.92:
score = 10
elif accuracy >= 0.9:
score = 9
elif accuracy >= 0.85:
score = 8
elif accuracy >= 0.8:
score = 7
elif accuracy >= 0.75:
score = 6
elif accuracy >= 0.70:
score = 5
else:
score = 4
return score
if __name__ == '__main__':
program_start = time.time()
accuracy = evaluate()
score = calculate_score(accuracy)
program_end = time.time()
total_time = round(program_end - program_start,2)
print()
print("Execution time (seconds) = ", total_time)
print('Accuracy = ' + str(accuracy))
print("Score = ", score)
print()
| StarcoderdataPython |
11394468 | <filename>tests/test_redis/test_bitop.py
from common import *
def test_bitop_in_master_slave():
nc = get_redis_conn(is_ms=True)
nc.execute_command('AUTH', redis_passwd)
nc.mset({'a': '111', 'b': '222', 'c': '333', 'd': '444'})
assert(nc.bitop('and', 'new_key', 'a', 'b', 'c', 'd'))
assert(nc.bitop('or', 'new_key', 'a', 'b', 'c'))
assert(nc.bitop('xor', 'new_key', 'a', 'b'))
assert(nc.bitop('not', 'new_key', 'a'))
def test_bitop_in_shards():
nc = get_redis_conn(is_ms=False)
nc.mset({'a': '111', 'b': '222', 'c': '333', 'd': '444'})
assert_fail('unknown command', nc.bitop, 'and', 'new-key', 'a', 'b', 'c', 'd')
| StarcoderdataPython |
9619980 | # coding=utf-8
"""
@author: <NAME>
@date: 07/20/2020
@description: Transform <filename.csv> data file to <torch.Dataset>
"""
import torch
from torch.utils.data import Dataset, DataLoader
from models.hierachical_attention_network.utils import load_ndarray
import torch.nn as nn
import numpy as np
class AgNewsDataset(Dataset):
def __init__(self, docs_file, labels_file):
# Load ndarray
self.docs_array = load_ndarray(docs_file)
self.labels_array = load_ndarray(labels_file)
# Transform ndarray to tensor
self.docs_tensor = torch.from_numpy(self.docs_array).long()
self.labels_tensor = torch.from_numpy(self.labels_array).long()
self.labels_tensor = torch.from_numpy(self.labels_array).long()
def __getitem__(self, idx):
features = self.docs_tensor[idx]
label = self.labels_tensor[idx]
return label, features
def __len__(self):
return self.docs_array.shape[0]
| StarcoderdataPython |
3531603 | from ..infrastructure import DockerUtils, WindowsUtils
from .base import DiagnosticBase
import os, platform
from os.path import abspath, dirname, join
class diagnostic8Gig(DiagnosticBase):
# The tag we use for built images
IMAGE_TAG = 'adamrehn/ue4-docker/diagnostics:8gig'
def getName(self):
'''
Returns the human-readable name of the diagnostic
'''
return 'Check for Docker 8GiB filesystem layer bug'
def getDescription(self):
'''
Returns a description of what the diagnostic does
'''
return '\n'.join([
'This diagnostic determines if the Docker daemon suffers from the 8GiB filesystem',
'layer bug reported here: https://github.com/moby/moby/issues/37581',
'',
'This bug was fixed in Docker CE 18.09.0, but still exists in some versions of',
'Docker CE under Windows 10 and Docker EE under Windows Server.',
'',
'You can force the use of a Linux image under Windows hosts by specifying the',
'--linux flag, which can be useful when testing Docker Desktop or LCOW support.'
])
def run(self, logger, args=[]):
'''
Runs the diagnostic
'''
# Determine which image platform we will build the Dockerfile for (default is the host platform unless overridden)
containerPlatform = 'linux' if '--linux' in args or platform.system().lower() != 'windows' else 'windows'
# Verify that the user isn't trying to test Windows containers under Windows 10 when in Linux container mode (or vice versa)
dockerPlatform = DockerUtils.info()['OSType'].lower()
if containerPlatform == 'windows' and dockerPlatform == 'linux':
logger.error('[8gig] Error: attempting to test Windows containers while Docker Desktop is in Linux container mode.', False)
logger.error('[8gig] Use the --linux flag if you want to test Linux containers instead.', False)
return False
elif containerPlatform == 'linux' and dockerPlatform == 'windows':
logger.error('[8gig] Error: attempting to test Linux containers while Docker Desktop is in Windows container mode.', False)
logger.error('[8gig] Remove the --linux flag if you want to test Windows containers instead.', False)
return False
# Under Windows host systems, determine the appropriate container image base tag
buildArgs = [
'-m', '4GiB',
'--build-arg',
'BASETAG=' + WindowsUtils.getReleaseBaseTag(WindowsUtils.getWindowsRelease())
] if containerPlatform == 'windows' else []
# Attempt to build the Dockerfile
contextDir = join(dirname(dirname(abspath(__file__))), 'dockerfiles', 'diagnostics', '8gig', containerPlatform)
try:
logger.action('[8gig] Attempting to build an image with an 8GiB filesystem layer...', False)
command = ['docker', 'build', '-t', diagnostic8Gig.IMAGE_TAG, contextDir] + buildArgs
self._printAndRun(logger, '[8gig] ', command, check=True)
built = True
except:
logger.error('[8gig] Build failed!')
built = False
# Remove any built images, including intermediate images
logger.action('[8gig] Cleaning up...', False)
if built == True:
self._printAndRun(logger, '[8gig] ', ['docker', 'rmi', diagnostic8Gig.IMAGE_TAG])
self._printAndRun(logger, '[8gig] ', ['docker', 'system', 'prune', '-f'])
# Inform the user of the outcome of the diagnostic
if built == True:
logger.action('[8gig] Diagnostic succeeded! The Docker daemon can build images with 8GiB filesystem layers.\n')
else:
logger.error('[8gig] Diagnostic failed! The Docker daemon cannot build images with 8GiB filesystem layers.\n', True)
return built
| StarcoderdataPython |
1601253 | <reponame>adyaksaw/xendit-python
import pytest
import time
from .base_integration_test import BaseIntegrationTest
from tests.sampleresponse.direct_debit import (
customer_response,
multi_customer_response,
linked_account_response,
accessible_accounts_response,
payment_method_response,
multi_payment_method_response,
payment_response,
multi_payment_response,
)
from xendit import DirectDebitPaymentMethodType
class TestDirectDebit(BaseIntegrationTest):
@pytest.fixture(scope="class")
def DirectDebit(self, xendit_instance):
return xendit_instance.DirectDebit
@pytest.fixture(scope="class")
def customer_data(self, DirectDebit):
customer = DirectDebit.create_customer(
reference_id=f"merc-{int(time.time())}",
email="<EMAIL>",
given_names="Adyaksa",
)
return customer
@pytest.fixture(scope="class")
def linked_account_data(self, DirectDebit, customer_data):
customer = customer_data
linked_account_token = DirectDebit.initialize_tokenization(
customer_id=customer.id,
channel_code="DC_BRI",
properties={
"account_mobile_number": "+62818555988",
"card_last_four": "8888",
"card_expiry": "06/24",
"account_email": "<EMAIL>",
},
)
validated_linked_account_token = DirectDebit.validate_token_otp(
linked_account_token_id=linked_account_token.id, otp_code="333000",
)
accessible_accounts = DirectDebit.get_accessible_accounts_by_token(
linked_account_token_id=validated_linked_account_token.id
)
return linked_account_token, validated_linked_account_token, accessible_accounts
@pytest.fixture(scope="class")
def payment_method_data(self, DirectDebit, linked_account_data):
linked_account_token, _, accessible_accounts = linked_account_data
payment_method = DirectDebit.create_payment_method(
customer_id=linked_account_token.customer_id,
type=DirectDebitPaymentMethodType.DEBIT_CARD,
properties={"id": accessible_accounts[0].id},
)
return payment_method
@pytest.fixture(scope="class")
def payment_data(self, DirectDebit, payment_method_data):
payment_method = payment_method_data
payment = DirectDebit.create_payment(
reference_id=f"direct-debit-ref-{int(time.time())}",
payment_method_id=payment_method.id,
currency="IDR",
amount="60000",
callback_url="http://webhook.site/",
enable_otp=True,
idempotency_key=f"idemp_key-{int(time.time())}",
)
return payment
def test_create_customer_return_correct_keys(self, customer_data):
customer = customer_data
self.assert_returned_object_has_same_key_as_sample_response(
customer, customer_response()
)
@pytest.mark.skip(reason="Currently not implemented by Direct Debit")
def test_get_customer_by_ref_id_return_correct_keys(
self, DirectDebit, customer_data
):
customer = customer_data
tested_customer = DirectDebit.get_customer_by_ref_id(
reference_id=customer.reference_id,
)
self.assert_returned_object_has_same_key_as_sample_response(
tested_customer[0], multi_customer_response()
)
def test_linked_account_token_scheme_return_correct_keys(self, linked_account_data):
(
linked_account_token,
validated_linked_account_token,
accessible_accounts,
) = linked_account_data
self.assert_returned_object_has_same_key_as_sample_response(
linked_account_token, linked_account_response()
)
self.assert_returned_object_has_same_key_as_sample_response(
validated_linked_account_token, linked_account_response()
)
self.assert_returned_object_has_same_key_as_sample_response(
accessible_accounts[0], accessible_accounts_response()[0]
)
def test_create_payment_method_return_correct_keys(
self, DirectDebit, payment_method_data
):
payment_method = payment_method_data
self.assert_returned_object_has_same_key_as_sample_response(
payment_method, payment_method_response()
)
def test_get_payment_methods_by_ref_id_return_correct_keys(
self, DirectDebit, customer_data
):
customer = customer_data
payment_methods = DirectDebit.get_payment_methods_by_customer_id(
customer_id=customer.id,
)
self.assert_returned_object_has_same_key_as_sample_response(
payment_methods[0], multi_payment_method_response()[0]
)
def test_create_payment_return_correct_keys(self, payment_data):
payment = payment_data
self.assert_returned_object_has_same_key_as_sample_response(
payment, payment_response()
)
def test_validate_payment_otp_return_correct_keys(self, DirectDebit, payment_data):
payment = payment_data
validated_payment = DirectDebit.validate_payment_otp(
direct_debit_id=payment.id, otp_code="222000",
)
self.assert_returned_object_has_same_key_as_sample_response(
validated_payment, payment_response()
)
def test_get_payment_status_return_correct_keys(self, DirectDebit, payment_data):
payment = payment_data
payment = DirectDebit.get_payment_status(direct_debit_id=payment.id)
self.assert_returned_object_has_same_key_as_sample_response(
payment, payment_response()
)
def test_get_payment_status_by_ref_id_return_correct_keys(
self, DirectDebit, payment_data
):
payment = payment_data
payments = DirectDebit.get_payment_status_by_ref_id(
reference_id=payment.reference_id
)
self.assert_returned_object_has_same_key_as_sample_response(
payments[0], multi_payment_response()[0]
)
| StarcoderdataPython |
6516112 | # -*- coding: utf-8 -
import codecs
import io
import os
from setuptools import setup
with io.open(os.path.join(os.path.dirname(__file__), 'README.md'),
encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'bernhard',
version = '0.2.6',
description = 'Python client for Riemann',
long_description = long_description,
author = '<NAME>',
author_email = '<EMAIL>',
license = 'ASF2.0',
url = 'http://github.com/banjiewen/bernhard.git',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: Log Analysis',
'Topic :: Utilities',
'Topic :: System :: Networking :: Monitoring'
],
zip_safe = False,
packages = ['bernhard'],
include_package_data = True,
install_requires=['protobuf >= 2.4']
)
| StarcoderdataPython |
1629680 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-20 13:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('common', '0007_auto_20171031_0715'),
('partner', '0040_auto_20171212_0703'),
]
operations = [
migrations.CreateModel(
name='PartnerAuditReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('org_audit', models.CharField(choices=[('Int', 'Internal audit'), ('Fin', 'Financial statement audit'), ('Don', 'Donor audit')], max_length=3)),
('link_report', models.URLField(blank=True, null=True)),
('partner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='audit_reports', to='partner.Partner')),
('most_recent_audit_report', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='partner_audit_reports', to='common.CommonFile')),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='partner',
name='staff_globally',
field=models.CharField(blank=True, choices=[('SG1', '1 to 10'), ('SG2', '11 to 25'), ('SG3', '26 to 50'), ('SG4', '51 to 100'), ('SG5', '101 to 250'), ('SG6', '251 to 500'), ('SG7', 'more than 500')], max_length=3, null=True),
),
migrations.AlterField(
model_name='partner',
name='staff_in_country',
field=models.CharField(blank=True, choices=[('SG1', '1 to 10'), ('SG2', '11 to 25'), ('SG3', '26 to 50'), ('SG4', '51 to 100'), ('SG5', '101 to 250'), ('SG6', '251 to 500'), ('SG7', 'more than 500')], max_length=3, null=True),
),
]
| StarcoderdataPython |
1688669 | from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
urlpatterns = [
path('', include(('modules.activities.urls', 'activities'), namespace='activities')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
119703 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2020 ICON Foundation Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Any
from ...wallet.wallet import Wallet
BASE_TYPES = {bool, bytes, int, str, Wallet}
TYPE_NAME_TO_TYPE = {_type.__name__: _type for _type in BASE_TYPES}
def is_base_type(value: type) -> bool:
try:
return value in BASE_TYPES
except:
return False
def name_to_type(type_name: str) -> type:
return TYPE_NAME_TO_TYPE[type_name]
def isinstance_ex(value: Any, _type: type) -> bool:
if not isinstance(value, _type):
return False
if type(value) is bool and _type is not bool:
return False
return True
def base_object_to_str(value: Any) -> str:
if isinstance(value, Wallet):
return value.get_address()
elif isinstance(value, int):
return hex(value)
elif isinstance(value, bytes):
return bytes_to_hex(value)
elif isinstance(value, bool):
return hex(value)
elif isinstance(value, str):
return value
raise TypeError(f"Unsupported type: {type(value)}")
def object_to_str(value: Any) -> Union[Any]:
if is_base_type(type(value)):
return base_object_to_str(value)
if isinstance(value, list):
return [object_to_str(i) for i in value]
if isinstance(value, dict):
return {k: object_to_str(value[k]) for k in value}
if value is None:
return None
raise TypeError(f"Unsupported type: {type(value)}")
def bytes_to_hex(value: bytes, prefix: str = "0x") -> str:
return f"{prefix}{value.hex()}"
| StarcoderdataPython |
1656263 | from Database import Database
class Users(object):
def __init__(self, idUsuario = 0, nome = '', telefone = '', email = '', usuario = '', senha = ''):
self.info = {}
self.idUsuario = idUsuario
self.nome = nome
self.telefone = telefone
self.email = email
self.usuario = usuario
self.senha = senha
def insertUser(self):
database = Database()
try:
c = database.connection.cursor()
c.execute("""
INSERT INTO usuarios (nome, telefone, email, usuario, senha)
VALUES ('{}', '{}', '{}', '{}', '{}')
""".format(self.nome, self.telefone, self.email, self.usuario, self.senha))
database.connection.commit()
c.close()
return "Usuário cadastrado com sucesso!"
except:
return "Ocorreu um erro ao cadastrar o usuário."
def updateUser(self):
database = Database()
try:
c = database.connection.cursor()
c.execute("""
UPDATE usuarios SET nome='{}', telefone='{}', email='{}',
usuario='{}', senha='{}' WHERE idusuario={}
""".format(self.nome, self.telefone, self.email, self.usuario, self.senha, self.idUsuario))
database.connection.commit()
c.close()
return "Usuário atualizado com sucesso!"
except:
return "Ocorreu um erro ao atualizar o usuário."
def deleteUser(self):
database = Database()
try:
c = database.connection.cursor()
c.execute("""
DELETE FROM usuarios WHERE idusuario={}
""".format(self.idUsuario))
database.connection.commit()
c.close()
return "Usuário atualizado com sucesso!"
except:
return "Ocorreu um erro ao atualizar o usuário."
def getUser(self, idusuario):
database = Database()
try:
c = database.connection.cursor()
c.execute("""
SELECT * FROM usuarios WHERE idusuario={}
""".format(idusuario))
for line in c:
self.idUsuario = line[0]
self.nome = line[1]
self.telefone = line[2]
self.email = line[3]
self.usuario = line[4]
self.senha = line[5]
c.close()
return "Usuário buscado com sucesso!"
except:
return "Ocorreu um erro ao buscar o usuário."
| StarcoderdataPython |
3318272 | <filename>amqpstorm/management/exception.py
from amqpstorm.exception import AMQPError
from amqpstorm.exception import AMQP_ERROR_MAPPING
class ApiError(AMQPError):
"""Management Api Error"""
def __init__(self, message=None, *args, **kwargs):
self._message = message
self._error_code = kwargs.pop('reply_code', None)
super(AMQPError, self).__init__(*args, **kwargs)
if self._error_code not in AMQP_ERROR_MAPPING:
return
self._error_type = AMQP_ERROR_MAPPING[self._error_code][0]
self._documentation = AMQP_ERROR_MAPPING[self._error_code][1]
def __str__(self):
if self._error_code in AMQP_ERROR_MAPPING:
return '%s - %s' % (self.error_type, self.documentation)
return self._message
class ApiConnectionError(AMQPError):
"""Management Api Connection Error"""
pass
| StarcoderdataPython |
87611 | <filename>_assignments/design-patterns/protocol/protocol_descriptor_b.py
"""
* Assignment: Protocol Descriptor ValueRange
* Complexity: easy
* Lines of code: 9 lines
* Time: 13 min
English:
1. Define descriptor class `ValueRange` with attributes:
a. `name: str`
b. `min: float`
c. `max: float`
d. `value: float`
2. Define class `Astronaut` with attributes:
a. `age = ValueRange('Age', min=28, max=42)`
b. `height = ValueRange('Height', min=150, max=200)`
3. Setting `Astronaut` attribute should invoke boundary check of `ValueRange`
4. Run doctests - all must succeed
Polish:
1. Zdefiniuj klasę-deskryptor `ValueRange` z atrybutami:
a. `name: str`
b. `min: float`
c. `max: float`
d. `value: float`
2. Zdefiniuj klasę `Astronaut` z atrybutami:
a. `age = ValueRange('Age', min=28, max=42)`
b. `height = ValueRange('Height', min=150, max=200)`
3. Ustawianie atrybutu `Astronaut` powinno wywołać sprawdzanie zakresu z `ValueRange`
6. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> mark = Astronaut('<NAME>', 36, 170)
>>> melissa = Astronaut('<NAME>', 44, 170)
Traceback (most recent call last):
ValueError: Age is not between 28 and 42
>>> alex = Astronaut('<NAME>', 40, 201)
Traceback (most recent call last):
ValueError: Height is not between 150 and 200
"""
class ValueRange:
name: str
min: float
max: float
value: float
def __init__(self, name, min, max):
pass
class Astronaut:
age = ValueRange('Age', min=28, max=42)
height = ValueRange('Height', min=150, max=200)
| StarcoderdataPython |
21168 | <filename>backend/bios/apps.py
from django.apps import AppConfig
class BiosConfig(AppConfig):
name = 'bios'
| StarcoderdataPython |
3213058 | <gh_stars>1-10
import pyotp
import yaml
import argparse
import argcomplete
import os
from os.path import expanduser
from argcomplete.completers import ChoicesCompleter
def get_accounts():
file_path = os.path.join(expanduser('~'), '.otp-accounts.yml')
try:
with open(file_path, 'r') as stream:
try:
return yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
exit(1)
except FileNotFoundError:
print('Accounts YAML should be present at {}'.format(file_path))
exit(1)
def select_account(accounts_list):
parser = argparse.ArgumentParser()
parser.add_argument('account').completer = ChoicesCompleter(accounts_list.keys())
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args.account
def secret_for_account(accounts_list, account_key):
for acc in accounts_list.items():
if acc[0] == account_key:
return acc[1]
return None
def main():
accounts = get_accounts()
account = select_account(accounts)
# special case for output
if account == 'list' and 'list' not in accounts.keys():
for account in accounts.keys():
print(account)
exit(0)
secret = secret_for_account(accounts, account)
if secret is not None:
totp = pyotp.TOTP(secret)
print(totp.now())
else:
exit(2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6625176 | import abc
import functools
from uqbar.objects import get_repr
from supriya.system import SupriyaObject
class SessionObject(SupriyaObject):
"""
A non-realtime session object, analogous to ServerObject.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ()
### INITIALIZER ###
@abc.abstractmethod
def __init__(self, session):
import supriya.nonrealtime
prototype = (supriya.nonrealtime.Session, type(None))
assert isinstance(session, prototype)
self._session = session
### SPECIAL METHODS ###
def __repr__(self):
return get_repr(self, multiline=False)
### PUBLIC METHODS ###
@staticmethod
def require_offset(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
import supriya.nonrealtime
if isinstance(self, supriya.nonrealtime.Session):
session = self
else:
session = self.session
if "offset" not in kwargs or kwargs["offset"] is None:
if not session._active_moments:
raise ValueError("No active moment.")
offset = session._active_moments[-1].offset
kwargs["offset"] = offset
if isinstance(self, SessionObject):
if not (self.start_offset <= kwargs["offset"] <= self.stop_offset):
raise ValueError(
"Offset {} must intersect [{}, {}]".format(
float(offset), self.start_offset, self.stop_offset
)
)
with session.at(kwargs["offset"]):
return function(self, *args, **kwargs)
return wrapper
### PUBLIC PROPERTIES ###
@property
def session(self):
return self._session
| StarcoderdataPython |
5086886 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 14:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0005_auto_20170610_2010'),
]
operations = [
migrations.RenameField(
model_name='answer',
old_name='user',
new_name='owner',
),
]
| StarcoderdataPython |
9663906 | import click
from flask.cli import AppGroup
from friday import storage
from friday.models import Recipe, RecipeImage
from friday.schemas import Recipe as RecipeSchema
recipe_group = AppGroup("recipe")
@recipe_group.command("new")
@click.argument("itemname")
@click.option("--name", "-n", multiple=True)
@click.option("--tag", "-t", multiple=True)
def make_recipe(itemname, name, tag):
"""Create new recipe"""
r = Recipe.create(name=itemname, namesList=name, tagsList=tag,)
click.echo(RecipeSchema.jsonify(r).get_data())
@recipe_group.command("get")
@click.argument("itemid")
def get_recipe(itemid):
"""Get a recipe"""
r = Recipe.query_list().get(itemid)
if not r:
click.echo("Not found")
return
click.echo(RecipeSchema.jsonify(r).get_data())
@recipe_group.command("update")
@click.argument("itemid")
@click.argument("itemname")
@click.option("--name", "-n", multiple=True)
@click.option("--tag", "-t", multiple=True)
def update_recipe(itemid, itemname, name, tag):
"""Update new recipe"""
r = Recipe.query_list().get(itemid)
if not r:
click.echo("Not found")
return
r.update(
name=itemname, namesList=name, tagsList=tag,
)
click.echo(RecipeSchema.jsonify(r).get_data())
@recipe_group.command("delete")
@click.argument("itemid")
def delete_recipe(itemid):
"""Get a recipe"""
r = Recipe.query_list().get(itemid)
if not r:
click.echo("Not found")
return
r.delete()
click.echo("Done")
@recipe_group.command("add-image")
@click.argument("itemid")
@click.argument("imageurl")
def add_image(itemid, imageurl):
"""Add image"""
r = Recipe.query_list().get(itemid)
if not r:
click.echo("Not found")
return
RecipeImage.create(recipe=r, url=imageurl)
click.echo(RecipeSchema.jsonify(r).get_data())
@recipe_group.command("remove-image")
@click.argument("itemid")
@click.argument("filename")
def remove_image(itemid, filename):
"""Remove image"""
r = Recipe.query_list().get(itemid)
if not r:
click.echo("Not found")
return
ri = next((i for i in r.images if i.filename == filename), None)
if not ri:
click.echo("Image not found")
return
storage.delete(filename)
ri.delete()
click.echo("Done")
| StarcoderdataPython |
6605857 | ## Command line functionality that calls ecg/ecg.py
| StarcoderdataPython |
1711190 | <reponame>waterloop/wcosa
"""
Main Script that calls other scripts to make wcosa work
"""
from __future__ import absolute_import
import argparse
from wcosa.command import handle, monitor, package_manager, use
from wcosa.info import __version__
from wcosa.objects.objects import Board, Fore, Generator, IDE, Path, Port
from wcosa.parsers import board_parser
from wcosa.utils import helper, output
def parse():
"""Adds command line arguments and returns the options"""
parser = argparse.ArgumentParser(description='WCosa create, build and upload Cosa AVR projects')
parser.add_argument(
'--path',
default=helper.get_working_directory(),
help='path to run action on (default: current directory)',
type=str)
subparsers = parser.add_subparsers(dest='action', metavar='action')
subparsers.required = True
subparsers.add_parser(
'version',
help='wcosa version')
create_parser = subparsers.add_parser(
'create',
help='create project')
create_parser.add_argument(
'--board',
help='board to use for wcosa project',
required=True,
type=str)
create_parser.add_argument(
'--ide',
help='create project structure for specific ide (default: none)',
type=str)
update_parser = subparsers.add_parser(
'update',
help='update project')
update_parser.add_argument(
'--board',
help='board to use for wcosa project',
type=str)
update_parser.add_argument(
'--ide',
help='update project structure for specific ide (default: none)',
type=str)
make_parser = subparsers.add_parser(
'make',
help='make configured project')
make_parser.add_argument(
'--make',
help='path to make binary',
type=str)
build_parser = subparsers.add_parser(
'build',
help='build project')
build_parser.add_argument(
'--generator',
help='makefile generator to use for build (default: Unix Makefiles)',
type=str)
build_parser.add_argument(
'--make',
help='path to make binary',
type=str)
build_parser.add_argument(
'--cmake',
help='path to cmake binary',
type=str)
upload_parser = subparsers.add_parser(
'upload',
help='upload project')
upload_parser.add_argument(
'--port',
help='port to upload the AVR traget to (default: automatic)',
type=str)
monitor_parser = subparsers.add_parser(
'monitor',
help='monitor AVR device')
monitor_parser.add_argument(
'--port',
help='port to monitor the AVR traget at (default: automatic)',
type=str)
monitor_parser.add_argument(
'--baud',
help='buad rate for serial (default: 9600)',
type=int)
subparsers.add_parser('boards', help='print supported boards')
subparsers.add_parser('clean', help='clean build files')
package_parser = subparsers.add_parser(
'package',
help='manipulate packages')
package_subparsers = package_parser.add_subparsers(
dest='package_command',
metavar='command')
package_subparsers.required = True
install_parser = package_subparsers.add_parser(
'install',
help='install package(s)')
install_parser.add_argument(
'package',
nargs='*',
type=str)
remove_parser = package_subparsers.add_parser(
'remove',
help='remove package(s)')
remove_parser.add_argument(
'package',
nargs='*',
type=str)
update_parser = package_subparsers.add_parser(
'update',
help='update all packages')
return parser.parse_args()
def print_boards():
"""Print all the available boards and their name"""
boards = board_parser.get_all_board(helper.get_wcosa_path() + '/wcosa/boards.json')
output.writeln('Boards compatible with this project are: ', Fore.CYAN)
for curr_board in boards:
name = board_parser.get_board_properties(curr_board, helper.get_wcosa_path() + '/wcosa/boards.json')['name']
output.writeln('{:15s} --->\t{}'.format(curr_board, name))
def main():
options = parse()
path = Path(options.path)
# based on the action call scripts
if options.action == 'version':
output.writeln(__version__)
if options.action == 'boards':
print_boards()
elif options.action == 'create':
handle.create_wcosa(path, Board(options.board), IDE(options.ide))
elif options.action == 'update':
handle.update_wcosa(path, Board(options.board), IDE(options.ide))
elif options.action == 'build':
use.build_wcosa(
path,
Generator(options.generator),
options.make,
options.cmake)
elif options.action == 'make':
use.build_wcosa(
path,
make=options.make,
needs_cmake=False)
elif options.action == 'upload':
use.upload_wcosa(path, Port(options.port))
elif options.action == 'clean':
use.clean_wcosa(path)
elif options.action == 'monitor':
monitor.serial_monitor(options.port, options.baud)
elif options.action == 'package':
if options.package_command == 'install':
if not options.package:
package_manager.package_install_pkglist(options.path)
else:
package_manager.package_install_many(
options.path,
' '.join(options.package).split(', '))
elif options.package_command == 'update':
package_manager.package_update_all(options.path)
elif options.package_command == 'remove':
package_manager.package_uninstall_many(
options.path,
' '.join(options.package).split(', '))
if __name__ == '__main__':
main()
| StarcoderdataPython |
252680 |
import config
import sys
from subprocess import call
from PyQt5 import QtGui, QtCore
from PyQt5.QtCore import pyqtRemoveInputHook
from PyQt5.QtWidgets import QPushButton, QCheckBox, QWidget, QVBoxLayout, QLabel, QGroupBox, \
QHBoxLayout, QLineEdit, QRadioButton, QGridLayout, QComboBox, QFileDialog, QApplication, \
QTabWidget, QInputDialog, QScrollArea, QMessageBox, QGraphicsDropShadowEffect, QSlider
from PyQt5.QtGui import QCursor
import options
import randomizer
import update
import constants
import time
import traceback
if sys.version_info[0] < 3:
raise Exception("Python 3 or a more recent version is required. Report this to Green Knight")
# Extended QButton widget to hold flag value - NOT USED PRESENTLY
class FlagButton(QPushButton):
def __init__(self, text, value):
super(FlagButton, self).__init__()
self.setText(text)
self.value = value
# Extended QCheckBox widget to hold flag value - CURRENTLY USED
class FlagCheckBox(QCheckBox):
def __init__(self, text, value):
super(FlagCheckBox, self).__init__()
self.setText(text)
self.value = value
class Window(QWidget):
def __init__(self):
super().__init__()
# window geometry data
self.title = "Beyond Chaos Randomizer"
self.left = 200
self.top = 200
self.width = 1000
self.height = 700
# values to be sent to Randomizer
self.romText = ""
self.version = "4"
self.mode = "normal" # default
self.seed = ""
self.flags = []
# dictionaries to hold flag data
self.aesthetic = {}
self.sprite = {}
self.spriteCategories = {}
self.experimental = {}
self.gamebreaking = {}
self.major = {}
self.flag = {}
self.battle = {}
self.beta = {}
self.dictionaries = [self.flag, self.sprite, self.spriteCategories, self.battle, self.aesthetic, self.major,
self.experimental, self.gamebreaking, self.beta]
#keep a list of all checkboxes
self.checkBoxes = []
# array of supported game modes
self.supportedGameModes = ["normal", "katn", "ancientcave", "speedcave", "racecave", "dragonhunt"]
# dictionary of game modes for drop down
self.GameModes = {}
# array of preset flags and codes
self.supportedPresets = ["newplayer", "intermediateplayer", "advancedplayer", "raceeasy", "racemedium", "raceinsane"]
# dictionay of game presets from drop down
self.GamePresets = {}
#tabs names for the tabs in flags box
self.tabNames = ["Flags", "Sprites", "SpriteCategories", "Battle", "Aesthetic", "Major", "Experimental", "Gamebreaking"]
# ui elements
self.flagString = QLineEdit() #flag text box for displaying the flags
self.comboBox = QComboBox() #flag saved preset dropdown
self.modeBox = QComboBox() #game mode drop down to pick what gamemode
self.presetBox = QComboBox() #official supported preset flags
self.modeDescription = QLabel("Pick a Game Mode!")
self.flagDescription = QLabel("Pick a Flag Set!")
#tabs: Flags, Sprites, Battle, etc...
self.tab1 = QWidget()
self.tab2 = QWidget()
self.tab3 = QWidget()
self.tab4 = QWidget()
self.tab5 = QWidget()
self.tab6 = QWidget()
self.tab7 = QWidget()
self.tab8 = QWidget()
#self.middleLeftGroupBox = QGroupBox() #obsolted
self.tablist = [self.tab1, self.tab2, self.tab3, self.tab4, self.tab5, self.tab6, self.tab7, self.tab8]
#global busy notifications
flagsChanging = False
# ----------- Begin buiding program/window
# ------------------------------
# pull data from files
self.initCodes()
# create window using geometry data
self.InitWindow()
self.romInput.setText(self.romText)
self.updateFlagString()
self.updateFlagCheckboxes()
self.flagButtonClicked()
self.updatePresetDropdown()
self.clearUI() #clear the UI of all selections
def InitWindow(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# build the UI
self.CreateLayout()
# show program onscreen
self.show() #maximize the randomizer
#self.showMaximized() #maximize the randomizer
index = self.presetBox.currentIndex()
def CreateLayout(self):
# Primary Vertical Box Layout
vbox = QVBoxLayout()
titleLabel = QLabel("Beyond Chaos Randomizer")
font = QtGui.QFont("Arial", 24, QtGui.QFont.Black)
titleLabel.setFont(font)
titleLabel.setAlignment(QtCore.Qt.AlignCenter)
titleLabel.setMargin(10)
vbox.addWidget(titleLabel)
#select rom, set seed, generate button
vbox.addWidget(self.GroupBoxOneLayout())
#game mode, preset flag selections and description
vbox.addWidget(self.GroupBoxTwoLayout()) # Adding second groupbox to the layout
#flags box
vbox.addWidget(self.flagBoxLayout()) # Adding second/middle groupbox
self.setLayout(vbox)
def update(self):
Update.update()
QMessageBox.information(self, "Update Process", "Checking for updates, if found this will automatically close", QMessageBox.Ok)
# Top groupbox consisting of ROM selection, and Seed number input
def GroupBoxOneLayout(self):
topGroupBox = QGroupBox()
TopHBox = QHBoxLayout()
width = 250
height = 60
romLabel = QLabel("ROM:")
TopHBox.addWidget(romLabel)
self.romInput = QLineEdit()
self.romInput.setPlaceholderText("Required")
self.romInput.setReadOnly(True)
TopHBox.addWidget(self.romInput)
browseButton = QPushButton("Browse")
browseButton.setMaximumWidth(width)
browseButton.setMaximumHeight(height)
browseButton.setStyleSheet("font:bold; font-size:18px; height:24px; background-color: #5A8DBE; color: #E4E4E4;")
browseButton.clicked.connect(lambda: self.openFileChooser())
browseButton.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
browseEffect = QGraphicsDropShadowEffect()
browseEffect.setBlurRadius(3)
browseButton.setGraphicsEffect(browseEffect)
TopHBox.addWidget(browseButton)
# space is a small hack so the S isn't in shadow.
seedLabel = QLabel(" Seed:")
TopHBox.addWidget(seedLabel)
self.seedInput = QLineEdit()
self.seedInput.setPlaceholderText("Optional")
TopHBox.addWidget(self.seedInput)
generateButton = QPushButton("Generate Seed")
generateButton.setMaximumWidth(width)
generateButton.setMaximumHeight(height)
generateButton.setStyleSheet("font:bold; font-size:18px; height:24px; background-color: #5A8DBE; color: #E4E4E4;")
generateButton.clicked.connect(lambda: self.generateSeed())
generateButton.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
generateEffect = QGraphicsDropShadowEffect()
generateEffect.setBlurRadius(3)
generateButton.setGraphicsEffect(generateEffect)
TopHBox.addWidget(generateButton)
topGroupBox.setLayout(TopHBox)
return topGroupBox
def GroupBoxTwoLayout(self):
self.compileModes()
self.compileSupportedPresets()
vhbox = QVBoxLayout()
groupBoxTwo = QGroupBox()
topGroupBox = QGroupBox()
bottomGroupBox = QGroupBox()
topHBox = QHBoxLayout()
bottomHBox = QHBoxLayout()
topHBox.addStretch(0)
bottomHBox.addStretch(0)
# ---- Game Mode Drop Down ---- #
gameModeLabel = QLabel("Game Mode")
gameModeLabel.setMaximumWidth(60)
topHBox.addWidget(gameModeLabel, alignment = QtCore.Qt.AlignLeft)
for item in self.GameModes.items():
self.modeBox.addItem(item[0])
self.modeBox.currentTextChanged.connect(lambda: self.updateGameDescription())
topHBox.addWidget(self.modeBox, alignment = QtCore.Qt.AlignLeft)
# ---- Preset Flags Drop Down ---- #
presetModeLabel = QLabel("Preset Flags")
presetModeLabel.setMaximumWidth(60)
topHBox.addWidget(presetModeLabel, alignment = QtCore.Qt.AlignRight)
self.presetBox.addItem("Select a flagset")
self.loadSavedFlags()
for item in self.GamePresets.items():
self.presetBox.addItem(item[0])
self.presetBox.currentTextChanged.connect(lambda: self.updatePresetDropdown())
topHBox.addWidget(self.presetBox,alignment = QtCore.Qt.AlignLeft)
# ---- Update Button ---- #
updateButton = QPushButton("Check for Updates")
updateButton.setStyleSheet("font:bold; font-size:18px; height:24px; background-color: #5A8DBE; color: #E4E4E4;")
width = 250
height = 60
updateButton.setMaximumWidth(width)
updateButton.setMaximumHeight(height)
updateButton.clicked.connect(lambda: self.update())
updateButton.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
effect = QGraphicsDropShadowEffect()
effect.setBlurRadius(3)
updateButton.setGraphicsEffect(effect)
topHBox.addWidget(updateButton,alignment = QtCore.Qt.AlignLeft)
# ---- Mode Description ---- #
gameModeDescriptionLabel = QLabel("Game Mode Description:")
gameModeDescriptionLabel.setStyleSheet("font-size:14px; height:24px; color:#253340;")
bottomHBox.addWidget(gameModeDescriptionLabel, alignment = QtCore.Qt.AlignLeft)
self.modeDescription.setStyleSheet("font-size:14px; height:24px; color:#253340;")
bottomHBox.addWidget(self.modeDescription, alignment = QtCore.Qt.AlignLeft)
# ---- Spacer ---- #
spacerDescriptionLabel = QLabel(" ")
spacerDescriptionLabel.setStyleSheet("font-size:14px; height:24px; color:#253340;")
bottomHBox.addWidget(spacerDescriptionLabel, alignment = QtCore.Qt.AlignLeft)
# ---- Preset Description ---- #
flagDescriptionLabel = QLabel("Flag Description:")
flagDescriptionLabel.setStyleSheet("font-size:14px; height:24px; color:#253340;")
bottomHBox.addWidget(flagDescriptionLabel, alignment = QtCore.Qt.AlignLeft)
self.flagDescription.setStyleSheet("font-size:14px; height:24px; color:#253340;")
bottomHBox.addWidget(self.flagDescription, alignment = QtCore.Qt.AlignLeft)
topGroupBox.setLayout(topHBox)
bottomGroupBox.setLayout(bottomHBox)
vhbox.addWidget(topGroupBox)
vhbox.addWidget(bottomGroupBox)
groupBoxTwo.setLayout(vhbox)
return groupBoxTwo
def flagBoxLayout(self):
groupBoxTwo = QGroupBox()
middleHBox = QHBoxLayout()
middleRightGroupBox = QGroupBox("Flag Selection")
tabVBoxLayout = QVBoxLayout()
tabs = QTabWidget()
# loop to add tab objects to 'tabs' TabWidget
for t, d, names in zip(self.tablist, self.dictionaries, self.tabNames):
tabObj = QScrollArea()
tabs.addTab(tabObj, names)
tablayout = QVBoxLayout()
for flagname, flagdesc in d.items():
cbox = FlagCheckBox(f"{flagname} - {flagdesc['explanation']}", flagname)
self.checkBoxes.append(cbox)
tablayout.addWidget(cbox)
#cbox.setCheckable(True)
#cbox.setToolTip(flagdesc['explanation'])
cbox.clicked.connect(lambda checked: self.flagButtonClicked())
t.setLayout(tablayout)
#tablayout.addStretch(1)
tabObj.setWidgetResizable(True)
tabObj.setWidget(t)
tabVBoxLayout.addWidget(tabs)
# this is the line in the layout that displays the string of selected flags
# and the button to save those flags
widgetV = QWidget()
widgetVBoxLayout = QVBoxLayout()
widgetV.setLayout(widgetVBoxLayout)
widgetVBoxLayout.addWidget(QLabel("Text-string of selected flags:"))
self.flagString.textChanged.connect(self.textchanged)
widgetVBoxLayout.addWidget(self.flagString)
saveButton = QPushButton("Save flags selection")
saveButton.clicked.connect(lambda: self.saveFlags())
widgetVBoxLayout.addWidget(saveButton)
# This part makes a group box and adds the selected-flags display
# and a button to clear the UI
flagTextWidget = QGroupBox()
flagTextHBox = QHBoxLayout()
flagTextHBox.addWidget(widgetV)
clearUiButton = QPushButton("Reset")
clearUiButton.setStyleSheet("font-size:12px; height:60px")
clearUiButton.clicked.connect(lambda: self.clearUI())
flagTextHBox.addWidget(clearUiButton)
flagTextWidget.setLayout(flagTextHBox)
tabVBoxLayout.addWidget(flagTextWidget)
middleRightGroupBox.setLayout(tabVBoxLayout)
# ------------- Part two (right) end ---------------------------------------
# add widgets to HBoxLayout and assign to middle groupbox layout
middleHBox.addWidget(middleRightGroupBox)
groupBoxTwo.setLayout(middleHBox)
return groupBoxTwo
# Middle groupbox of sub-groupboxes. Consists of left section (game mode
# selection)
# and right section (flag selection -> tab-sorted)
def GroupBoxThreeLayout(self):
groupBoxTwo = QGroupBox()
middleHBox = QHBoxLayout()
middleRightGroupBox = QGroupBox("Flag Selection")
tabVBoxLayout = QVBoxLayout()
tabs = QTabWidget()
tabNames = ["Flags", "Sprites", "SpriteCategories", "Battle", "Aesthetic", "Major", "Experimental", "Gamebreaking", "Beta"]
############## Checkboxes and inline descriptions #####################
#setStyleSheet("border:none");
# loop to add tab objects to 'tabs' TabWidget
for t, d, names in zip(self.tablist, self.dictionaries, tabNames):
tabObj = QScrollArea()
tabs.addTab(tabObj, names)
#we have a horizontal box that can go item1, item 2 in left-right fashion
itemLayout = QHBoxLayout()
#we then have two vertical boxes, one for normal flags, one for flags that have sliders or entry boxes.
boxOneLayout = QVBoxLayout()
boxTwoVertLayout = QVBoxLayout()
boxTwoHorzLayout = QVBoxLayout()
#we then have the group boxes the vertical tayouts get set into
groupOneBox = QGroupBox()
groupTwoVertBox = QGroupBox()
groupTwoHorzBox = QGroupBox()
flagGroup = QGroupBox()
for flagname, flagdesc in d.items():
#TODO: this can probably be done better once I know GUI better...
if flagname == "exp":
cbox = FlagCheckBox(f"{flagname} - {flagdesc['explanation']}", flagname)
#self.checkBoxes.append(cbox)
#cbox.clicked.connect(lambda checked: self.flagButtonClicked())
#boxTwoHorzLayout.addWidget(cbox)
#slider = QSlider(QtCore.Qt.Horizontal)
#boxTwoHorzLayout.addWidget(slider)
#groupTwoHorzBox.setLayout(boxTwoHorzLayout)
#boxTwoVertLayout.addWidget(groupTwoHorzBox)
#groupTwoHorzBox.setLayout(boxTwoVertLayout)
else:
cbox = FlagCheckBox(f"{flagname} - {flagdesc['explanation']}", flagname)
self.checkBoxes.append(cbox)
cbox.clicked.connect(lambda checked: self.flagButtonClicked())
boxOneLayout.addWidget(cbox) #context - adding a second pane to certain tabs for now for sliders.
groupOneBox.setLayout(boxOneLayout)
itemLayout.addWidget(groupOneBox)
#itemLayout.addWidget(groupTwoHorzBox)
t.setLayout(itemLayout)
#tablayout.addStretch(1)
tabObj.setWidgetResizable(True)
tabObj.setWidget(t)
tabVBoxLayout.addWidget(tabs)
#----------- tabs done ----------------------------
# this is the line in the layout that displays the string of selected
# flags
# and the button to save those flags
widgetV = QWidget()
widgetVBoxLayout = QVBoxLayout()
widgetV.setLayout(widgetVBoxLayout)
widgetVBoxLayout.addWidget(QLabel("Text-string of selected flags:"))
self.flagString.textChanged.connect(self.textchanged)
widgetVBoxLayout.addWidget(self.flagString)
saveButton = QPushButton("Save flags selection")
saveButton.clicked.connect(lambda: self.saveFlags())
widgetVBoxLayout.addWidget(saveButton)
# This part makes a group box and adds the selected-flags display
# and a button to clear the UI
flagTextWidget = QGroupBox()
flagTextHBox = QHBoxLayout()
flagTextHBox.addWidget(widgetV)
clearUiButton = QPushButton("Reset")
clearUiButton.setStyleSheet("font:bold; font-size:16px; height:60px; background-color: #5A8DBE; color: #E4E4E4;")
clearUiButton.clicked.connect(lambda: self.clearUI())
clearUiButton.setCursor(QCursor(QtCore.Qt.PointingHandCursor))
effect = QGraphicsDropShadowEffect()
effect.setBlurRadius(3)
clearUiButton.setGraphicsEffect(effect)
flagTextHBox.addWidget(clearUiButton)
flagTextWidget.setLayout(flagTextHBox)
tabVBoxLayout.addWidget(flagTextWidget)
middleRightGroupBox.setLayout(tabVBoxLayout)
# ------------- Part two (right) end
# ---------------------------------------
# add widgets to HBoxLayout and assign to middle groupbox layout
middleHBox.addWidget(middleRightGroupBox)
groupBoxTwo.setLayout(middleHBox)
return groupBoxTwo
# Bottom groupbox consisting of saved seeds selection box, and button to
# generate seed
def GroupBoxFourLayout(self):
bottomGroupBox = QGroupBox()
bottomHBox = QHBoxLayout()
bottomHBox.addWidget(QLabel("Saved flag selection: "))
#todo: Add amount of seeds to generate here.
#todo: Add retry on failure checkbox
bottomHBox.addStretch(1)
bottomGroupBox.setLayout(bottomHBox)
return bottomGroupBox
# --------------------------------------------------------------------------------
# -------------- NO MORE LAYOUT DESIGN PAST THIS POINT
# ---------------------------
# --------------------------------------------------------------------------------
def textchanged(self, text):
if (self.flagsChanging):
return
self.flagsChanging = True
for c in self.checkBoxes:
c.setChecked(False)
values = text.split()
self.flags.clear()
self.flagString.clear()
for v in values:
for d in self.dictionaries:
for flagname in d:
if v == flagname:
for c in self.checkBoxes:
if v == c.value:
c.setChecked(True)
self.flags.append(c.value)
self.updateFlagString()
self.flagsChanging = False
# (At startup) Opens reads code flags/descriptions and
# puts data into separate dictionaries
def initCodes(self):
for code in options.NORMAL_CODES + options.MAKEOVER_MODIFIER_CODES:
if code.category == "aesthetic":
d = self.aesthetic
elif code.category == "sprite":
d = self.sprite
elif code.category == "spriteCategories":
d = self.spriteCategories
elif code.category == "experimental":
d = self.experimental
elif code.category == "gamebreaking":
d = self.gamebreaking
elif code.category == "major":
d = self.major
elif code.category == "beta":
d = self.beta
elif code.category == "battle":
d = self.battle
else:
print(f"Code {code.name} does not have a valid category.")
continue
d[code.name] = {'explanation': code.long_description, 'checked': False}
for flag in sorted(options.ALL_FLAGS):
self.flag[flag.name] = {'explanation': flag.description, 'checked': True}
# opens input dialog to get a name to assign a desired seed flagset, then
# saves flags and selected mode to the cfg file
def saveFlags(self):
text, okPressed = QInputDialog.getText(self, "Save Seed", "Enter a name for this flagset", QLineEdit.Normal, "")
if okPressed and text != '':
self.GamePresets[text] = self.flagString.text()
#flagString = self.flagString.text()
#for flag in self.flags:
# flagString += flag + " "
Config.Writeflags(text, self.flagString.text())
index = self.presetBox.findText(text)
if index == -1:
self.presetBox.addItem(text)
else:
self.presetBox.removeItem(index)
self.presetBox.addItem(text)
index = self.presetBox.findText(text)
self.presetBox.setCurrentIndex(index)
def loadSavedFlags(self):
flagset = config.readFlags()
if flagset != None:
for text, flags in flagset.items():
self.GamePresets[text] = flags
# delete preset. Dialog box confirms users choice to delete. check is
# done to ensure file
# exists before deletion is attempted.
def deleteSeed(self):
seed = self.comboBox.currentText()
if not seed == "Select a preset":
response = QMessageBox.question(self, 'Delete confimation', f"Do you want to delete \'{seed}\'?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if response == QMessageBox.Yes:
del self.savedPresets[seed]
self.comboBox.removeItem(self.comboBox.findText(seed))
def updateGameDescription(self):
self.modeDescription.clear()
index = self.modeBox.currentIndex()
for item in self.GameModes.items():
if index == 0:
if item[0] == "Normal":
self.modeDescription.setText(item[1])
self.mode = "normal"
elif index == 1:
if item[0] == "Race - Kefka @ Narshe":
self.modeDescription.setText(item[1])
self.mode = "katn"
elif index == 2:
if item[0] == "Ancient Cave":
self.modeDescription.setText(item[1])
self.mode = "ancientcave"
elif index == 3:
if item[0] == "Speed Cave":
self.modeDescription.setText(item[1])
self.mode = "speedcave"
elif index == 4:
if item[0] == "Race - Randomized Cave":
self.modeDescription.setText(item[1])
self.mode = "racecave"
elif index == 5:
if item[0] == "Race - Dragon Hunt":
self.modeDescription.setText(item[1])
self.mode = "dragonhunt"
else:
self.modeDescription.setText("Pick a Game Mode!")
def updatePresetDropdown(self, index = -1):
text = self.presetBox.currentText()
index = self.presetBox.findText(text)
flags = self.GamePresets.get(text)
if index ==0:
self.clearUI()
self.flagDescription.clear()
self.flagDescription.setText("Pick a flag set!")
elif index == 1:
self.flagDescription.setText("Flags designed for a new player")
self.flagString.setText(flags)
elif index == 2:
self.flagDescription.setText("Flags designed for an intermediate player")
self.flagString.setText(flags)
elif index == 3:
self.flagDescription.setText("Flags designed for an advanced player")
self.flagString.setText(flags)
elif index == 4:
self.flagDescription.setText("Flags designed for easy difficulty races")
self.flagString.setText(flags)
elif index == 5:
self.flagDescription.setText("Flags designed for medium difficulty races")
self.flagString.setText(flags)
elif index == 6:
self.flagDescription.setText("Flags designed for insane difficulty races")
self.flagString.setText(flags)
else:
self.flagDescription.setText("Custom saved flags")
self.flagString.setText(flags)
def clearUI(self):
self.seed = ""
self.flags.clear
self.seedInput.setText(self.seed)
self.modeBox.setCurrentIndex(0)
self.initCodes()
self.updateFlagCheckboxes()
self.flagButtonClicked()
self.flagString.clear()
self.flags.clear()
self.updateGameDescription()
# when flag UI button is checked, update corresponding dictionary values
def flagButtonClicked(self):
self.flags.clear()
for t, d in zip(self.tablist, self.dictionaries):
children = t.findChildren(FlagCheckBox)
for c in children:
if c.isChecked():
d[c.value]['checked'] = True
flagset = False
for flag in self.flags:
if flag == d[c.value]:
flagset = true
if flagset == False:
self.flags.append(c.value)
else:
d[c.value]['checked'] = False
#self.updateDictionaries()
self.updateFlagString()
# Opens file dialog to select rom file and assigns it to value in
# parent/Window class
def openFileChooser(self):
file_path = QFileDialog.getOpenFileName(self, 'Open File', './',
filter="ROMs (*.smc *.sfc *.fig);;All Files(*.*)")
# display file location in text input field
self.romInput.setText(str(file_path[0]))
def compileModes(self):
for mode in self.supportedGameModes:
if mode == "normal":
self.GameModes['Normal'] = "Play through the normal story with randomized gameplay."
elif mode == "katn":
self.GameModes['Race - Kefka @ Narshe'] = "Race through the story and defeat Kefka at Narshe"
elif mode == "ancientcave":
self.GameModes['Ancient Cave'] = "Play though a long randomized dungeon."
elif mode == "speedcave":
self.GameModes['Speed Cave'] = "Play through a medium randomized dungeon."
elif mode == "racecave":
self.GameModes['Race - Randomized Cave'] = "Race through a short randomized dungeon."
elif mode == "dragonhunt":
self.GameModes['Race - Dragon Hunt'] = "Race to kill all 8 dragons."
def compileSupportedPresets(self):
for mode in self.supportedPresets:
if mode == "newplayer":
self.GamePresets['New Player'] = "b c e g i m n o p q r s t w y z alasdraco capslockoff partyparty makeover johnnydmad"
elif mode == "intermediateplayer":
self.GamePresets['Intermediate Player'] = "b c d e g i j k l m n o p q r s t w y z alasdraco capslockoff makeover partyparty johnnydmad notawaiter mimetime"
elif mode == "advancedplayer":
self.GamePresets['Advanced Player'] = "b c d e f g i j k l m n o p q r s t u w y z alasdraco capslockoff johnnydmad makeover notawaiter partyparty dancingmaduin bsiab mimetime randombosses"
elif mode == "raceeasy":
self.GamePresets['Race - Easy'] = "b c d e f g i j k m n o p q r s t w y z capslockoff johnnydmad makeover notawaiter partyparty madworld"
elif mode == "racemedium":
self.GamePresets['Race - Medium'] = "b c d e f g i j k m n o p q r s t u w y z capslockoff johnnydmad makeover notawaiter partyparty electricboogaloo randombosses madworld"
elif mode == "raceinsane":
self.GamePresets['Race - Insane'] = "b c d e f g i j k m n o p q r s t u w y z capslockoff johnnydmad makeover notawaiter partyparty darkworld madworld bsiab electricboogaloo randombosses"
# Get seed generation parameters from UI to prepare for seed generation
# This will show a confirmation dialog, and call the local Randomizer.py
# file
# and pass arguments to it
def generateSeed(self):
self.romText = self.romInput.text()
if self.romText == "": # Checks if user ROM is blank
QMessageBox.about(self, "Error", "You need to select a FFVI rom!")
else:
self.seed = self.seedInput.text()
displaySeed = self.seed
if self.seed == "":
displaySeed = "(none)" # pretty-printing :)
flagMode = ""
for flag in self.flags:
flagMode += flag
flagMsg = ""
for flag in self.flags:
if flagMsg != "":
flagMsg += "\n----"
flagMsg += flag
# This makes the flag string more readable in the confirm dialog
message = ((f"Rom: {self.romText}\n"
f"Seed: {displaySeed}\n"
f"Mode: {self.mode}\n"
f"Flags: \n----{flagMsg}\n"
f"(Hyphens are not actually used in seed generation)"))
messBox = QMessageBox.question(self, "Confirm Seed Generation?", message, QMessageBox.Yes | QMessageBox.Cancel)
if messBox == 16384: # User selects confirm/accept/yes option
#finalFlags = self.flags.replace(" ", "")
bundle = f"{self.version}.{self.mode}.{flagMode}.{self.seed}"
# remove spam if the Randomizer asks for input
# TODO: guify that stuff
# Hash check can be moved out to when you pick the file.
# If you delete the file between picking it and running, just
# spit out an error, no need to prompt.
# Randomboost could send a signal ask for a number or whatever,
# but maybe it's better to just remove it or pick a fixed
# number?
QtCore.pyqtRemoveInputHook()
# TODO: put this in a new thread
try:
result_file = randomizer.randomize(args=['BeyondChaos.py', self.romText, bundle, "test"])
#call(["py", "Randomizer.py", self.romText, bundle, "test"])
# Running the Randomizer twice in one session doesn't work
# because of global state.
# Exit so people don't try it.
# TODO: fix global state then remove this
except Exception as e:
traceback.print_exc()
QMessageBox.critical(self, "Error creating ROM", str(e), QMessageBox.Ok)
else:
QMessageBox.information(self, "Successfully created ROM", f"Result file: {result_file}", QMessageBox.Ok)
return
#sys.exit() Lets no longer sysexit anymore so we don't have to
#reopen each time. The user can close the gui.
# read each dictionary and update text field showing flag codes based upon
# flags denoted as 'True'
def updateFlagString(self):
self.flagsChanging = True
self.flagString.clear()
temp = ""
for x in range(0, len(self.flags)):
flag = self.flags[x]
temp+= flag
temp+=" "
self.flagString.setText(temp)
self.flagsChanging = False
# read through dictionaries and set flag checkboxes as 'checked'
def updateFlagCheckboxes(self):
for t, d in zip(self.tablist, self.dictionaries):
# create a list of all checkbox objects from the current QTabWidget
children = t.findChildren(FlagCheckBox)
# enumerate checkbox objects and set them to 'checked' if
# corresponding
# flag value is true
for c in children:
value = c.value
#print(value + str(d[value]['checked']))
if d[value]['checked']:
c.setProperty('checked', True)
else:
c.setProperty('checked', False)
if __name__ == "__main__":
print("Loading GUI, checking for config file, updater file and updates please wait.")
try:
update.configExists()
App = QApplication(sys.argv)
window = Window()
time.sleep(3)
sys.exit(App.exec())
except Exception:
traceback.print_exc()
| StarcoderdataPython |
6404026 | """
Tests for system.output module.
"""
import pytest
import test_common.system.output as system_output
@pytest.mark.output
def test_log_function_output():
with system_output.OutputLogger.get_logger() as logger:
expected_value = "Hello world!"
expected_value_2 = "Bye"
print(expected_value)
assert expected_value in logger.output
print(expected_value_2)
assert logger.output[1] == expected_value_2
| StarcoderdataPython |
11393167 | <reponame>juliawestermayr/schnetpack
import argparse
from argparse import ArgumentParser
from schnetpack.datasets import (
QM9,
ANI1,
MD17,
OrganicMaterialsDatabase,
MaterialsProject,
)
class StoreDictKeyPair(argparse.Action):
"""
From https://stackoverflow.com/a/42355279
"""
def __init__(self, option_strings, dest, nargs=None, val_type=str, **kwargs):
self._nargs = nargs
self.val_type = val_type
super(StoreDictKeyPair, self).__init__(
option_strings, dest, nargs=nargs, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values:
k, v = kv.split("=")
# typecast
if self.val_type == int:
v = int(float(v))
else:
v = self.val_type(v)
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def get_mode_parsers():
# json parser
json_parser = ArgumentParser(add_help=False)
json_parser.add_argument(
"json_path",
type=str,
help="Path to argument file. (default: %(default)s)",
default=None,
)
# train parser
train_parser = ArgumentParser(add_help=False)
train_parser.add_argument("datapath", help="Path to dataset")
train_parser.add_argument("modelpath", help="Path of stored model")
train_parser.add_argument(
"--cuda", help="Set flag to use GPU(s) for training", action="store_true"
)
train_parser.add_argument(
"--Huber", help="Set flag to use the Huber loss instead of the L2 loss for better handling of outliers.", action="store_true"
)
train_parser.add_argument(
"--parallel",
help="Run data-parallel on all available GPUs (specify with environment"
" variable CUDA_VISIBLE_DEVICES)",
action="store_true",
)
train_parser.add_argument(
"--seed", type=int, default=None, help="Set random seed for torch and numpy."
)
train_parser.add_argument(
"--mlmm", type =str, default = None, help="Enables training of only the QM region for the Delta-Learning approach in QMMM. Requires a file name as argument. "
)
train_parser.add_argument(
"--overwrite", help="Remove previous model directory.", action="store_true"
)
# data split
train_parser.add_argument(
"--split_path", help="Path / destination of npz with data splits", default=None
)
train_parser.add_argument(
"--split",
help="Split into [train] [validation] and use remaining for testing",
type=int,
nargs=2,
default=[None, None],
)
train_parser.add_argument(
"--max_epochs",
type=int,
help="Maximum number of training epochs (default: %(default)s)",
default=5000,
)
train_parser.add_argument(
"--max_steps",
type=int,
help="Maximum number of training steps (default: %(default)s)",
default=None,
)
train_parser.add_argument(
"--lr",
type=float,
help="Initial learning rate (default: %(default)s)",
default=1e-4,
)
train_parser.add_argument(
"--lr_patience",
type=int,
help="Epochs without improvement before reducing the learning rate "
"(default: %(default)s)",
default=25,
)
train_parser.add_argument(
"--lr_decay",
type=float,
help="Learning rate decay (default: %(default)s)",
default=0.8,
)
train_parser.add_argument(
"--lr_min",
type=float,
help="Minimal learning rate (default: %(default)s)",
default=1e-6,
)
train_parser.add_argument(
"--logger",
help="Choose logger for training process (default: %(default)s)",
choices=["csv", "tensorboard"],
default="csv",
)
train_parser.add_argument(
"--log_every_n_epochs",
type=int,
help="Log metrics every given number of epochs (default: %(default)s)",
default=1,
)
train_parser.add_argument(
"--n_epochs",
type=int,
help="Maximum number of training epochs (default: %(default)s)",
default=1000,
)
train_parser.add_argument(
"--checkpoint_interval",
type=int,
help="Store checkpoint every n epochs (default: %(default)s)",
default=1,
)
train_parser.add_argument(
"--keep_n_checkpoints",
type=int,
help="Number of checkpoints that will be stored (default: %(default)s)",
default=3,
)
# evaluation parser
eval_parser = ArgumentParser(add_help=False)
eval_parser.add_argument("datapath", help="Path to dataset")
eval_parser.add_argument("modelpath", help="Path of stored model")
eval_parser.add_argument(
"--cuda", help="Set flag to use GPU(s) for evaluation", action="store_true"
)
eval_parser.add_argument(
"--parallel",
help="Run data-parallel on all available GPUs (specify with environment"
" variable CUDA_VISIBLE_DEVICES)",
action="store_true",
)
eval_parser.add_argument(
"--mlmm", type =str, default = None, help="Enables training of only the QM region for the Delta-Learning approach in QMMM. Requires a file name as argument. "
)
eval_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for evaluation (default: %(default)s)",
default=100,
)
eval_parser.add_argument(
"--split",
help="Evaluate trained model on given split",
choices=["train", "validation", "test"],
default=["test"],
nargs="+",
)
eval_parser.add_argument(
"--overwrite", help="Remove previous evaluation files", action="store_true"
)
return json_parser, train_parser, eval_parser
def get_model_parsers():
# model parsers
schnet_parser = ArgumentParser(add_help=False)
schnet_parser.add_argument(
"--features", type=int, help="Size of atom-wise representation", default=128
)
schnet_parser.add_argument(
"--interactions", type=int, help="Number of interaction blocks", default=6
)
schnet_parser.add_argument(
"--cutoff_function",
help="Functional form of the cutoff",
choices=["hard", "cosine", "mollifier"],
default="cosine",
)
schnet_parser.add_argument(
"--num_gaussians",
type=int,
default=50,
help="Number of Gaussians to expand distances (default: %(default)s)",
)
schnet_parser.add_argument(
"--normalize_filter",
action="store_true",
help="Normalize convolution filters by number of neighbors",
)
wacsf_parser = ArgumentParser(add_help=False)
wacsf_parser.add_argument(
"--radial",
type=int,
default=22,
help="Number of radial symmetry functions (default: %(default)s)",
)
wacsf_parser.add_argument(
"--angular",
type=int,
default=5,
help="Number of angular symmetry functions (default: %(default)s)",
)
wacsf_parser.add_argument(
"--zetas",
type=int,
nargs="+",
default=[1],
help="List of zeta exponents used for angle resolution (default: %(default)s)",
)
wacsf_parser.add_argument(
"--standardize",
action="store_true",
help="Standardize wACSF before atomistic network.",
)
# Atomistic network parameters
wacsf_parser.add_argument(
"--n_nodes",
type=int,
default=100,
help="Number of nodes in atomic networks (default: %(default)s)",
)
wacsf_parser.add_argument(
"--n_layers",
type=int,
default=2,
help="Number of layers in atomic networks (default: %(default)s)",
)
# Advances wACSF settings
wacsf_parser.add_argument(
"--centered",
action="store_true",
help="Use centered Gaussians for radial functions",
)
wacsf_parser.add_argument(
"--crossterms", action="store_true", help="Use crossterms in angular functions"
)
wacsf_parser.add_argument(
"--behler", action="store_true", help="Switch to conventional ACSF"
)
wacsf_parser.add_argument(
"--elements",
default=["H", "C", "N", "O", "F"],
nargs="+",
help="List of elements to be used for symmetry functions "
"(default: %(default)s).",
)
return schnet_parser, wacsf_parser
def get_data_parsers():
# qm9
qm9_parser = ArgumentParser(add_help=False)
qm9_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=QM9.U0,
choices=[
QM9.A,
QM9.B,
QM9.C,
QM9.mu,
QM9.alpha,
QM9.homo,
QM9.lumo,
QM9.gap,
QM9.r2,
QM9.zpve,
QM9.U0,
QM9.U,
QM9.H,
QM9.G,
QM9.Cv,
],
)
qm9_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
qm9_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
qm9_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
qm9_parser.add_argument(
"--remove_uncharacterized",
help="Remove uncharacterized molecules from QM9 (default: %(default)s)",
action="store_true",
)
ani1_parser = ArgumentParser(add_help=False)
ani1_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=ANI1.energy,
choices=[ANI1.energy],
)
ani1_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
ani1_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
ani1_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
ani1_parser.add_argument(
"--num_heavy_atoms",
type=int,
help="Number of heavy atoms that will be loaded into the database."
" (default: %(default)s)",
default=8,
)
matproj_parser = ArgumentParser(add_help=False)
matproj_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted" " (default: %(default)s)",
default=MaterialsProject.EformationPerAtom,
choices=[
MaterialsProject.EformationPerAtom,
MaterialsProject.EPerAtom,
MaterialsProject.BandGap,
MaterialsProject.TotalMagnetization,
],
)
matproj_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
matproj_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=32,
)
matproj_parser.add_argument(
"--environment_provider",
type=str,
default="torch",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
matproj_parser.add_argument(
"--apikey",
help="API key for Materials Project (see https://materialsproject.org/open)",
default=None,
)
matproj_parser.add_argument(
"--timestamp",
help="Timestamp at which to reconstruct the dataset",
default="2017-12-04 14:20",
)
md17_parser = ArgumentParser(add_help=False)
md17_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted" " (default: %(default)s)",
default=MD17.energy,
choices=[MD17.energy],
)
md17_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
md17_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
md17_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
md17_parser.add_argument(
"--ignore_forces", action="store_true", help="Ignore forces during training."
)
md17_parser.add_argument(
"--molecule",
type=str,
help="Choose molecule inside the MD17 dataset. (default: %(default)s)",
default="ethanol",
choices=MD17.datasets_dict.keys(),
)
md17_parser.add_argument(
"--rho",
type=float,
help="Energy-force trade-off. For rho=0, use forces only. "
"(default: %(default)s)",
default=0.1,
)
omdb_parser = ArgumentParser(add_help=False)
omdb_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=OrganicMaterialsDatabase.BandGap,
choices=[OrganicMaterialsDatabase.BandGap],
)
omdb_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
omdb_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=32,
)
omdb_parser.add_argument(
"--environment_provider",
type=str,
default="torch",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
custom_data_parser = ArgumentParser(add_help=False)
custom_data_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default="energy",
)
custom_data_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
custom_data_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
custom_data_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
custom_data_parser.add_argument(
"--derivative",
type=str,
help="Derivative of dataset property to be predicted (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--negative_dr",
action="store_true",
help="Multiply derivatives with -1 for training. (default: %(default)s)",
)
custom_data_parser.add_argument(
"--force",
type=str,
help="Name of force property in database. Alias for‚ derivative + setting "
"negative_dr. (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--contributions",
type=str,
help="Contributions of dataset property to be predicted (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--stress",
type=str,
help="Train on stress tensor if not None (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--aggregation_mode",
type=str,
help="Select mode for aggregating atomic properties. (default: %(default)s)",
default="sum",
)
custom_data_parser.add_argument(
"--output_module",
type=str,
help="Select matching output module for selected property. (default: %("
"default)s)",
default="atomwise",
choices=[
"atomwise",
"elemental_atomwise",
"dipole_moment",
"elemental_dipole_moment",
"polarizability",
"isotropic_polarizability",
"electronic_spatial_extent",
"charges",
],
)
custom_data_parser.add_argument(
"--rho",
action=StoreDictKeyPair,
nargs="+",
metavar="KEY=VAL",
help="Define loss tradeoff weights with prop=weight. (default: %(default)s)",
default=dict(),
val_type=float,
)
return (
qm9_parser,
ani1_parser,
matproj_parser,
md17_parser,
omdb_parser,
custom_data_parser,
)
def build_parser():
main_parser = ArgumentParser()
# get parsers
json_parser, train_parser, eval_parser = get_mode_parsers()
schnet_parser, wacsf_parser = get_model_parsers()
(
qm9_parser,
ani1_parser,
matproj_parser,
md17_parser,
omdb_parser,
custom_data_parser,
) = get_data_parsers()
# subparser structure
# mode
mode_subparsers = main_parser.add_subparsers(dest="mode", help="main arguments")
mode_subparsers.required = True
train_subparser = mode_subparsers.add_parser("train", help="training help")
eval_subparser = mode_subparsers.add_parser(
"eval", help="evaluation help", parents=[eval_parser]
)
json_subparser = mode_subparsers.add_parser(
"from_json", help="load from json help", parents=[json_parser]
)
# train mode
train_subparsers = train_subparser.add_subparsers(
dest="model", help="Model-specific arguments"
)
train_subparsers.required = True
# model
schnet_subparser = train_subparsers.add_parser("schnet", help="SchNet help")
wacsf_subparser = train_subparsers.add_parser("wacsf", help="wacsf help")
# schnet
schnet_subparsers = schnet_subparser.add_subparsers(
dest="dataset", help="Dataset specific arguments"
)
schnet_subparsers.required = True
schnet_subparsers.add_parser(
"ani1",
help="ANI1 dataset help",
parents=[train_parser, schnet_parser, ani1_parser],
)
schnet_matproj = schnet_subparsers.add_parser(
"matproj",
help="Materials Project dataset help",
parents=[train_parser, schnet_parser, matproj_parser],
)
schnet_matproj.set_defaults(normalize_filter=True)
schnet_subparsers.add_parser(
"md17",
help="MD17 dataset help",
parents=[train_parser, schnet_parser, md17_parser],
)
schnet_omdb = schnet_subparsers.add_parser(
"omdb",
help="Organic Materials dataset help",
parents=[train_parser, schnet_parser, omdb_parser],
)
schnet_omdb.set_defaults(normalize_filter=True)
schnet_subparsers.add_parser(
"qm9",
help="QM9 dataset help",
parents=[train_parser, schnet_parser, qm9_parser],
)
schnet_subparsers.add_parser(
"custom",
help="Custom dataset help",
parents=[train_parser, schnet_parser, custom_data_parser],
)
# wacsf
wacsf_subparsers = wacsf_subparser.add_subparsers(
dest="dataset", help="Dataset specific arguments"
)
wacsf_subparsers.required = True
wacsf_subparsers.add_parser(
"ani1",
help="ANI1 dataset help",
parents=[train_parser, wacsf_parser, ani1_parser],
)
wacsf_subparsers.add_parser(
"matproj",
help="Materials Project dataset help",
parents=[train_parser, wacsf_parser, matproj_parser],
)
wacsf_subparsers.add_parser(
"md17",
help="MD17 dataset help",
parents=[train_parser, wacsf_parser, md17_parser],
)
wacsf_subparsers.add_parser(
"omdb",
help="Organic Materials dataset help",
parents=[train_parser, wacsf_parser, omdb_parser],
)
wacsf_subparsers.add_parser(
"qm9", help="QM9 dataset help", parents=[train_parser, wacsf_parser, qm9_parser]
)
wacsf_subparsers.add_parser(
"custom",
help="Custom dataset help",
parents=[train_parser, wacsf_parser, custom_data_parser],
)
return main_parser
if __name__ == "__main__":
parser = build_parser()
args = parser.parse_args()
print(args)
| StarcoderdataPython |
334075 | <reponame>MarvinTeichmann/tensorflow_examples
import tensorflow as tf
import re
import params
IMAGE_SIZE = params.image_size
NUM_CHANNELS = params.num_channels
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
NUM_CLASSES = params.num_classes
def weight_variable(name, shape, stddev=0.1):
initializer = tf.truncated_normal_initializer(stddev=stddev)
return tf.get_variable(name, shape=shape, initializer=initializer)
def bias_variable(name, shape, constant=0.1):
initializer = tf.constant_initializer(constant)
return tf.get_variable(name, shape=shape, initializer=initializer)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x, name):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME', name=name
)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def inference(images, keep_prob, train=True, num_filter_1=32, num_filter_2=64):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
num_filter_1: Amount of filters in conv1.
num_filter_2: Amount of filters in conv2.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
# First Convolutional Layer
with tf.variable_scope('Conv1') as scope:
# Adding Convolutional Layers
W_conv1 = weight_variable('weights', [5, 5, NUM_CHANNELS, num_filter_1])
b_conv1 = bias_variable('biases', [num_filter_1])
x_image = tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1, name=scope.name)
_activation_summary(h_conv1)
# First Pooling Layer
h_pool1 = max_pool_2x2(h_conv1, name='pool1')
# Second Convolutional Layer
with tf.variable_scope('Conv2') as scope:
W_conv2 = weight_variable('weights', [5, 5, num_filter_1, num_filter_2])
b_conv2 = bias_variable('biases', [num_filter_2])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name=scope.name)
_activation_summary(h_conv2)
# Second Pooling Layer
h_pool2 = max_pool_2x2(h_conv2, name='pool2')
# Find correct dimension
dim = 1
for d in h_pool2.get_shape()[1:].as_list():
dim *= d
# Adding Fully Connected Layers
with tf.variable_scope('fc1') as scope:
W_fc1 = weight_variable('weights', [dim, 1024])
b_fc1 = bias_variable('biases',[1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, dim])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, name=scope.name)
_activation_summary(h_fc1)
# Adding Dropout
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name='dropout')
with tf.variable_scope('logits') as scope:
W_fc2 = weight_variable('weights', [1024, NUM_CLASSES])
b_fc2 = bias_variable('biases', [NUM_CLASSES])
logits = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2, name=scope.name)
_activation_summary(logits)
return logits
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
# Convert from sparse integer labels in the range [0, NUM_CLASSSES)
# to 1-hot dense float vectors (that is we will have batch_size vectors,
# each with NUM_CLASSES values, all of which are 0.0 except there will
# be a 1.0 in the entry corresponding to the label).
with tf.variable_scope('loss') as scope:
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def evaluation(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# For a classifier model, we can use the in_top_k Op.
# It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label's is was in the top k (here k=1)
# of all logits for that example.
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits, labels, 1)
# Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32))
| StarcoderdataPython |
1649527 | <reponame>quantastica/qconvert-py
# This code is part of quantastica.qconvert
#
# (C) Copyright Quantastica 2019.
# https://quantastica.com/
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from enum import Enum
from . import qobj_to_pyquil, qobj_to_toaster
from . import qconvert_base
class Format(Enum):
UNDEFINED = 0
QOBJ = 1
PYQUIL = 2
TOASTER = 3
# Options:
# - PYQUIL:
# all_experiments: - True: all experiments form Qobj will be converted and returned as list of strings.
# - False (default): only first experiment will be converted and returned as string
#
# create_exec_code: if True: print(qc.run(ex)) will be created. Default: True
#
# lattice: name of the backend lattice (e.g. "Aspen-7-28Q-A"). If ommited then "Nq-qvm" will be generated
# - special values: - "qasm_simulator" will produce "Nq-qvm" backend
# - "statevector_simulator" will produce WaveFunction simulator code
#
# as_qvm: True/False. if True, QVM will mimic QPU specified by lattice argument. Default: False
# seed: if valid integer set random_seed for qc.qam to given value. Default: None
# - TOASTER: ...
def convert(source_format, source_dict, dest_format, options = dict() ):
ret = None
if source_format == Format.QOBJ:
if dest_format == Format.PYQUIL:
ret = qobj_to_pyquil.qobj_to_pyquil(source_dict, options)
elif dest_format == Format.TOASTER:
ret = qobj_to_toaster.qobj_to_toaster(source_dict, options)
else:
msg = "Unsuported conversion formats - source: %s destination: %s"%(str(source_format),str(dest_format))
raise RuntimeError(msg)
else:
msg = "Unsuported conversion formats - source: %s destination: %s"%(str(source_format),str(dest_format))
raise RuntimeError(msg)
return ret
def supported_gates():
return list(qconvert_base.gate_defs.keys())
| StarcoderdataPython |
163526 | <gh_stars>0
from ModelOptions import compute_local_fragility
import Fragility.GlobalRegression
if compute_local_fragility:
import Fragility.LocalCurve
| StarcoderdataPython |
9775068 | # -*- coding: utf-8 -*-
"""
Universal Language Model Fine-tuning for Text Classification (ULMFiT).
Code by <NAME>
https://github.com/cstorm125/thai2fit/
Some pre-processing functions are from fastai (Apache 2.0)
https://github.com/fastai/fastai/blob/master/fastai/text/transform.py
Universal Language Model Fine-tuning for Text Classification
https://arxiv.org/abs/1801.06146
"""
__all__ = [
"THWIKI_LSTM",
"ThaiTokenizer",
"document_vector",
"merge_wgts",
"post_rules_th",
"post_rules_th_sparse",
"pre_rules_th",
"pre_rules_th_sparse",
"process_thai",
"fix_html",
"lowercase_all",
"remove_space",
"replace_rep_after",
"replace_rep_nonum",
"replace_url",
"replace_wrep_post",
"replace_wrep_post_nonum",
"rm_brackets",
"rm_useless_newlines",
"rm_useless_spaces",
"spec_add_spaces",
"ungroup_emoji"
]
from pythainlp.ulmfit.core import (
THWIKI_LSTM,
document_vector,
merge_wgts,
post_rules_th,
post_rules_th_sparse,
pre_rules_th,
pre_rules_th_sparse,
process_thai,
)
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.ulmfit.tokenizer import ThaiTokenizer
| StarcoderdataPython |
11224606 | from django.urls import path, include
app_name = 'shitter'
urlpatterns = [
path('', include('shitter.api.urls', namespace='shitter_api')),
]
| StarcoderdataPython |
3370986 | <reponame>erykoff/fgcm
import numpy as np
import os
import sys
import esutil
import time
from .fgcmUtilities import retrievalFlagDict
from .fgcmUtilities import MaxFitIterations
from .fgcmUtilities import Cheb2dField
from .fgcmUtilities import objFlagDict
from .fgcmNumbaUtilities import numba_test, add_at_1d, add_at_2d, add_at_3d
import multiprocessing
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmChisq(object):
"""
Class which computes the chi-squared for the fit.
parameters
----------
fgcmConfig: FgcmConfig
Config object
fgcmPars: FgcmParameters
Parameter object
fgcmStars: FgcmStars
Stars object
fgcmLUT: FgcmLUT
LUT object
Config variables
----------------
nCore: int
Number of cores to run in multiprocessing
nStarPerRun: int
Number of stars per run. More can use more memory.
noChromaticCorrections: bool
If set to True, then no chromatic corrections are applied. (bad idea).
"""
def __init__(self,fgcmConfig,fgcmPars,fgcmStars,fgcmLUT):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.debug('Initializing FgcmChisq')
# does this need to be shm'd?
self.fgcmPars = fgcmPars
# this is shm'd
self.fgcmLUT = fgcmLUT
# also shm'd
self.fgcmStars = fgcmStars
# need to configure
self.nCore = fgcmConfig.nCore
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.nStarPerRun = fgcmConfig.nStarPerRun
self.noChromaticCorrections = fgcmConfig.noChromaticCorrections
self.bandFitIndex = fgcmConfig.bandFitIndex
self.useQuadraticPwv = fgcmConfig.useQuadraticPwv
self.freezeStdAtmosphere = fgcmConfig.freezeStdAtmosphere
self.ccdGraySubCCD = fgcmConfig.ccdGraySubCCD
self.useRefStarsWithInstrument = fgcmConfig.useRefStarsWithInstrument
self.instrumentParsPerBand = fgcmConfig.instrumentParsPerBand
self.saveParsForDebugging = fgcmConfig.saveParsForDebugging
self.quietMode = fgcmConfig.quietMode
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
# these are the standard *band* I10s
self.I10StdBand = fgcmConfig.I10StdBand
self.illegalValue = fgcmConfig.illegalValue
if (fgcmConfig.useSedLUT and self.fgcmLUT.hasSedLUT):
self.useSedLUT = True
else:
self.useSedLUT = False
self.deltaMapperDefault = None
self.resetFitChisqList()
# this is the default number of parameters
self.nActualFitPars = self.fgcmPars.nFitPars
if not self.quietMode:
self.fgcmLog.info('Default: fit %d parameters.' % (self.nActualFitPars))
self.clearMatchCache()
self.maxIterations = -1
numba_test(0)
def resetFitChisqList(self):
"""
Reset the recorded list of chi-squared values.
"""
self.fitChisqs = []
self._nIterations = 0
@property
def maxIterations(self):
return self._maxIterations
@maxIterations.setter
def maxIterations(self, value):
self._maxIterations = value
def clearMatchCache(self):
"""
Clear the pre-match cache. Note that this isn't working right.
"""
self.matchesCached = False
self.goodObs = None
self.goodStarsSub = None
def setDeltaMapperDefault(self, deltaMapperDefault):
"""
Set the deltaMapperDefault array.
Parameters
----------
deltaMapperDefault : `np.recarray`
"""
self.deltaMapperDefault = deltaMapperDefault
def __call__(self,fitParams,fitterUnits=False,computeDerivatives=False,computeSEDSlopes=False,useMatchCache=False,computeAbsThroughput=False,ignoreRef=False,debug=False,allExposures=False,includeReserve=False,fgcmGray=None):
"""
Compute the chi-squared for a given set of parameters.
parameters
----------
fitParams: numpy array of floats
Array with the numerical values of the parameters (properly formatted).
fitterUnits: bool, default=False
Are the units of fitParams normalized for the minimizer?
computeDerivatives: bool, default=False
Compute fit derivatives?
computeSEDSlopes: bool, default=False
Compute SED slopes from magnitudes?
useMatchCache: bool, default=False
Cache observation matches. Do not use!
computeAbsThroughputt: `bool`, default=False
Compute the absolute throughput after computing mean mags
ignoreRef: `bool`, default=False
Ignore reference stars for computation...
debug: bool, default=False
Debug mode with no multiprocessing
allExposures: bool, default=False
Compute using all exposures, including flagged/non-photometric
includeReserve: bool, default=False
Compute using all objects, including those put in reserve.
fgcmGray: FgcmGray, default=None
CCD Gray information for computing with "ccd crunch"
"""
# computeDerivatives: do we want to compute the derivatives?
# computeSEDSlope: compute SED Slope and recompute mean mags?
# fitterUnits: units of th fitter or "true" units?
self.computeDerivatives = computeDerivatives
self.computeSEDSlopes = computeSEDSlopes
self.fitterUnits = fitterUnits
self.allExposures = allExposures
self.useMatchCache = useMatchCache
self.includeReserve = includeReserve
self.fgcmGray = fgcmGray # may be None
self.computeAbsThroughput = computeAbsThroughput
self.ignoreRef = ignoreRef
self.fgcmLog.debug('FgcmChisq: computeDerivatives = %d' %
(int(computeDerivatives)))
self.fgcmLog.debug('FgcmChisq: computeSEDSlopes = %d' %
(int(computeSEDSlopes)))
self.fgcmLog.debug('FgcmChisq: fitterUnits = %d' %
(int(fitterUnits)))
self.fgcmLog.debug('FgcmChisq: allExposures = %d' %
(int(allExposures)))
self.fgcmLog.debug('FgcmChisq: includeReserve = %d' %
(int(includeReserve)))
startTime = time.time()
if (self.allExposures and (self.computeDerivatives or
self.computeSEDSlopes)):
raise ValueError("Cannot set allExposures and computeDerivatives or computeSEDSlopes")
# When we're doing the fitting, we want to fill in the missing qe sys values if needed
self.fgcmPars.reloadParArray(fitParams, fitterUnits=self.fitterUnits)
self.fgcmPars.parsToExposures()
if self.saveParsForDebugging:
# put in saving of the parameters...
# this will be in both units
import astropy.io.fits as pyfits
tempCat = np.zeros(1, dtype=[('o3', 'f8', (self.fgcmPars.nCampaignNights, )),
('lnTauIntercept', 'f8', (self.fgcmPars.nCampaignNights, )),
('lnTauSlope', 'f8', (self.fgcmPars.nCampaignNights, )),
('alpha', 'f8', (self.fgcmPars.nCampaignNights, )),
('lnPwvIntercept', 'f8', (self.fgcmPars.nCampaignNights, )),
('lnPwvSlope', 'f8', (self.fgcmPars.nCampaignNights, )),
('lnPwvQuadratic', 'f8', (self.fgcmPars.nCampaignNights, )),
('qeSysIntercept', 'f8', (self.fgcmPars.parQESysIntercept.size, ))])
tempCat['o3'][0][:] = fitParams[self.fgcmPars.parO3Loc:
(self.fgcmPars.parO3Loc +
self.fgcmPars.nCampaignNights)]
tempCat['lnTauIntercept'][0][:] = fitParams[self.fgcmPars.parLnTauInterceptLoc:
(self.fgcmPars.parLnTauInterceptLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnTauSlope'][0][:] = fitParams[self.fgcmPars.parLnTauSlopeLoc:
(self.fgcmPars.parLnTauSlopeLoc +
self.fgcmPars.nCampaignNights)]
tempCat['alpha'][0][:] = fitParams[self.fgcmPars.parAlphaLoc:
(self.fgcmPars.parAlphaLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvIntercept'][0][:] = fitParams[self.fgcmPars.parLnPwvInterceptLoc:
(self.fgcmPars.parLnPwvInterceptLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvSlope'][0][:] = fitParams[self.fgcmPars.parLnPwvSlopeLoc:
(self.fgcmPars.parLnPwvSlopeLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvQuadratic'][0][:] = fitParams[self.fgcmPars.parLnPwvQuadraticLoc:
(self.fgcmPars.parLnPwvQuadraticLoc +
self.fgcmPars.nCampaignNights)]
tempCat['qeSysIntercept'][0][:] = fitParams[self.fgcmPars.parQESysInterceptLoc:
(self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.parQESysIntercept.size)]
pyfits.writeto('%s_fitParams_%d_fitterunits.fits' % (self.outfileBaseWithCycle, len(self.fitChisqs) + 1), tempCat, overwrite=True)
tempCat = np.zeros((1, ), dtype=[('o3', 'f8', self.fgcmPars.nCampaignNights),
('lnTauIntercept', 'f8', self.fgcmPars.nCampaignNights),
('lnTauSlope', 'f8', self.fgcmPars.nCampaignNights),
('alpha', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvIntercept', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvSlope', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvQuadratic', 'f8', self.fgcmPars.nCampaignNights),
('qeSysIntercept', 'f8', (self.fgcmPars.nBands, self.fgcmPars.nWashIntervals))])
tempCat['o3'][0][:] = self.fgcmPars.parO3
tempCat['lnTauIntercept'][0][:] = self.fgcmPars.parLnTauIntercept
tempCat['lnTauSlope'][0][:] = self.fgcmPars.parLnTauSlope
tempCat['alpha'][0][:] = self.fgcmPars.parAlpha
tempCat['lnPwvIntercept'][0][:] = self.fgcmPars.parLnPwvIntercept
tempCat['lnPwvSlope'][0][:] = self.fgcmPars.parLnPwvSlope
tempCat['lnPwvQuadratic'][0][:] = self.fgcmPars.parLnPwvQuadratic
tempCat['qeSysIntercept'][0][:, :] = self.fgcmPars.parQESysIntercept
pyfits.writeto('%s_fitParams_%s_parunits.fits' % (self.outfileBaseWithCycle, len(self.fitChisqs) + 1), tempCat, overwrite=True)
#############
# and reset numbers if necessary
if (not self.allExposures):
snmm.getArray(self.fgcmStars.objMagStdMeanHandle)[:] = 99.0
snmm.getArray(self.fgcmStars.objMagStdMeanNoChromHandle)[:] = 99.0
snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)[:] = 99.0
goodStars = self.fgcmStars.getGoodStarIndices(includeReserve=self.includeReserve)
if self._nIterations == 0:
self.fgcmLog.info('Found %d good stars for chisq' % (goodStars.size))
if (goodStars.size == 0):
raise RuntimeError("No good stars to fit!")
if self.fgcmStars.hasRefstars:
objRefIDIndex = snmm.getArray(self.fgcmStars.objRefIDIndexHandle)
# do global pre-matching before giving to workers, because
# it is faster this way
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
if (self.useMatchCache and self.matchesCached) :
# we have already done the matching
self.fgcmLog.debug('Retrieving cached matches')
goodObs = self.goodObs
goodStarsSub = self.goodStarsSub
else:
# we need to do matching
preStartTime=time.time()
self.fgcmLog.debug('Pre-matching stars and observations...')
if not self.allExposures:
expFlag = self.fgcmPars.expFlag
else:
expFlag = None
goodStarsSub, goodObs = self.fgcmStars.getGoodObsIndices(goodStars, expFlag=expFlag)
self.fgcmLog.debug('Pre-matching done in %.1f sec.' %
(time.time() - preStartTime))
if (self.useMatchCache) :
self.fgcmLog.debug('Caching matches for next iteration')
self.matchesCached = True
self.goodObs = goodObs
self.goodStarsSub = goodStarsSub
self.nSums = 4 # chisq, chisq_ref, nobs, nobs_ref
if self.computeDerivatives:
# 0: nFitPars -> derivative for regular chisq
# nFitPars: 2*nFitPars -> parameters which are touched (regular chisq)
# 2*nFitPars: 3*nFitPars -> derivative for reference stars
# 3*nFitPars: 4*nFitPars -> parameters that are touched (reference chisq)
self.nSums += 4 * self.fgcmPars.nFitPars
self.applyDelta = False
self.debug = debug
if (self.debug):
# debug mode: single core
self.totalHandleDict = {}
self.totalHandleDict[0] = snmm.createArray(self.nSums,dtype='f8')
self._magWorker((goodStars, goodObs))
if self.computeAbsThroughput:
self.applyDelta = True
self.deltaAbsOffset = self.fgcmStars.computeAbsOffset()
self.fgcmPars.compAbsThroughput *= 10.**(-self.deltaAbsOffset / 2.5)
if not self.allExposures:
self._chisqWorker((goodStars, goodObs))
partialSums = snmm.getArray(self.totalHandleDict[0])[:]
else:
# regular multi-core
mp_ctx = multiprocessing.get_context('fork')
# make a dummy process to discover starting child number
proc = mp_ctx.Process()
workerIndex = proc._identity[0]+1
proc = None
self.totalHandleDict = {}
for thisCore in range(self.nCore):
self.totalHandleDict[workerIndex + thisCore] = (
snmm.createArray(self.nSums,dtype='f8'))
# split goodStars into a list of arrays of roughly equal size
prepStartTime = time.time()
nSections = goodStars.size // self.nStarPerRun + 1
goodStarsList = np.array_split(goodStars,nSections)
# is there a better way of getting all the first elements from the list?
# note that we need to skip the first which should be zero (checked above)
# see also fgcmBrightObs.py
# splitValues is the first of the goodStars in each list
splitValues = np.zeros(nSections-1,dtype='i4')
for i in range(1,nSections):
splitValues[i-1] = goodStarsList[i][0]
# get the indices from the goodStarsSub matched list (matched to goodStars)
splitIndices = np.searchsorted(goodStars[goodStarsSub], splitValues)
# and split along the indices
goodObsList = np.split(goodObs,splitIndices)
workerList = list(zip(goodStarsList,goodObsList))
# reverse sort so the longest running go first
workerList.sort(key=lambda elt:elt[1].size, reverse=True)
self.fgcmLog.debug('Using %d sections (%.1f seconds)' %
(nSections,time.time()-prepStartTime))
self.fgcmLog.debug('Running chisq on %d cores' % (self.nCore))
# make a pool
pool = mp_ctx.Pool(processes=self.nCore)
# Compute magnitudes
pool.map(self._magWorker, workerList, chunksize=1)
# And compute absolute offset if desired...
if self.computeAbsThroughput:
self.applyDelta = True
self.deltaAbsOffset = self.fgcmStars.computeAbsOffset()
self.fgcmPars.compAbsThroughput *= 10.**(-self.deltaAbsOffset / 2.5)
# And the follow-up chisq and derivatives
if not self.allExposures:
pool.map(self._chisqWorker, workerList, chunksize=1)
pool.close()
pool.join()
# sum up the partial sums from the different jobs
partialSums = np.zeros(self.nSums,dtype='f8')
for thisCore in range(self.nCore):
partialSums[:] += snmm.getArray(
self.totalHandleDict[workerIndex + thisCore])[:]
if (not self.allExposures):
# we get the number of fit parameters by counting which of the parameters
# have been touched by the data (number of touches is irrelevant)
if self.computeDerivatives:
# Note that the extra partialSums for the reference stars will be zero
# if there are no reference stars.
nonZero, = np.where((partialSums[self.fgcmPars.nFitPars:
2*self.fgcmPars.nFitPars] > 0) |
(partialSums[3*self.fgcmPars.nFitPars:
4*self.fgcmPars.nFitPars] > 0))
self.nActualFitPars = nonZero.size
if self._nIterations == 0:
self.fgcmLog.info('Actually fit %d parameters.' % (self.nActualFitPars))
fitDOF = partialSums[-3] + partialSums[-1] - float(self.nActualFitPars)
if (fitDOF <= 0):
raise ValueError("Number of parameters fitted is more than number of constraints! (%d > %d)" % (self.fgcmPars.nFitPars,partialSums[-1]))
fitChisq = (partialSums[-4] + partialSums[-2]) / fitDOF
if self.computeDerivatives:
dChisqdP = (partialSums[0:self.fgcmPars.nFitPars] +
partialSums[2*self.fgcmPars.nFitPars: 3*self.fgcmPars.nFitPars]) / fitDOF
if self.saveParsForDebugging:
import astropy.io.fits as pyfits
tempCat = np.zeros((1, ), dtype=[('chisq', 'f8'),
('o3', 'f8', self.fgcmPars.nCampaignNights),
('lnTauIntercept', 'f8', self.fgcmPars.nCampaignNights),
('lnTauSlope', 'f8', self.fgcmPars.nCampaignNights),
('alpha', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvIntercept', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvSlope', 'f8', self.fgcmPars.nCampaignNights),
('lnPwvQuadratic', 'f8', self.fgcmPars.nCampaignNights),
('qeSysIntercept', 'f8', self.fgcmPars.nWashIntervals)])
tempCat['o3'][0][:] = dChisqdP[self.fgcmPars.parO3Loc:
(self.fgcmPars.parO3Loc +
self.fgcmPars.nCampaignNights)]
tempCat['lnTauIntercept'][0][:] = dChisqdP[self.fgcmPars.parLnTauInterceptLoc:
(self.fgcmPars.parLnTauInterceptLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnTauSlope'][0][:] = dChisqdP[self.fgcmPars.parLnTauSlopeLoc:
(self.fgcmPars.parLnTauSlopeLoc +
self.fgcmPars.nCampaignNights)]
tempCat['alpha'][0][:] = dChisqdP[self.fgcmPars.parAlphaLoc:
(self.fgcmPars.parAlphaLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvIntercept'][0][:] = dChisqdP[self.fgcmPars.parLnPwvInterceptLoc:
(self.fgcmPars.parLnPwvInterceptLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvSlope'][0][:] = dChisqdP[self.fgcmPars.parLnPwvSlopeLoc:
(self.fgcmPars.parLnPwvSlopeLoc +
self.fgcmPars.nCampaignNights)]
tempCat['lnPwvQuadratic'][0][:] = dChisqdP[self.fgcmPars.parLnPwvQuadraticLoc:
(self.fgcmPars.parLnPwvQuadraticLoc +
self.fgcmPars.nCampaignNights)]
tempCat['qeSysIntercept'][0][:] = dChisqdP[self.fgcmPars.parQESysInterceptLoc:
(self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.nWashIntervals)]
tempCat['chisq'][0] = fitChisq
pyfits.writeto('%s_dChisqdP_%d_fitterunits.fits' % (self.outfileBaseWithCycle, len(self.fitChisqs) + 1), tempCat, overwrite=True)
# want to append this...
self.fitChisqs.append(fitChisq)
self._nIterations += 1
self.fgcmLog.info('Chisq/dof = %.6f (%d iterations)' %
(fitChisq, len(self.fitChisqs)))
# Make sure the final chisq is at or near the minimum. Otherwise sometimes
# we cut out at one of the cray-cray points, and that is bad.
if (self.maxIterations > 0 and self._nIterations > self.maxIterations and
fitChisq < (np.min(np.array(self.fitChisqs)) + 0.1)):
self.fgcmLog.info('Ran over maximum number of iterations.')
raise MaxFitIterations
else:
try:
fitChisq = self.fitChisqs[-1]
except IndexError:
fitChisq = 0.0
# free shared arrays
for key in self.totalHandleDict.keys():
snmm.freeArray(self.totalHandleDict[key])
if not self.quietMode:
self.fgcmLog.info('Chisq computation took %.2f seconds.' %
(time.time() - startTime))
self.fgcmStars.magStdComputed = True
if (self.allExposures):
self.fgcmStars.allMagStdComputed = True
if (self.computeDerivatives):
return fitChisq, dChisqdP
else:
return fitChisq
def _magWorker(self, goodStarsAndObs):
"""
Multiprocessing worker to compute standard/mean magnitudes for FgcmChisq.
Not to be called on its own.
Parameters
----------
goodStarsAndObs: tuple[2]
(goodStars, goodObs)
"""
goodStars = goodStarsAndObs[0]
goodObs = goodStarsAndObs[1]
objMagStdMean = snmm.getArray(self.fgcmStars.objMagStdMeanHandle)
objMagStdMeanNoChrom = snmm.getArray(self.fgcmStars.objMagStdMeanNoChromHandle)
objMagStdMeanErr = snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)
objSEDSlope = snmm.getArray(self.fgcmStars.objSEDSlopeHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsLUTFilterIndex = snmm.getArray(self.fgcmStars.obsLUTFilterIndexHandle)
obsCCDIndex = snmm.getArray(self.fgcmStars.obsCCDHandle) - self.ccdStartIndex
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
obsSecZenith = snmm.getArray(self.fgcmStars.obsSecZenithHandle)
obsMagADU = snmm.getArray(self.fgcmStars.obsMagADUHandle)
obsMagADUModelErr = snmm.getArray(self.fgcmStars.obsMagADUModelErrHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
obsDeltaStd = snmm.getArray(self.fgcmStars.obsDeltaStdHandle)
# and fgcmGray stuff (if desired)
if (self.fgcmGray is not None):
ccdGray = snmm.getArray(self.fgcmGray.ccdGrayHandle)
# this is ccdGray[expIndex, ccdIndex]
# and we only apply when > self.illegalValue
# same sign as FGCM_DUST (QESys)
if np.any(self.ccdGraySubCCD):
ccdGraySubCCDPars = snmm.getArray(self.fgcmGray.ccdGraySubCCDParsHandle)
# and the arrays for locking access
objMagStdMeanLock = snmm.getArrayBase(self.fgcmStars.objMagStdMeanHandle).get_lock()
obsMagStdLock = snmm.getArrayBase(self.fgcmStars.obsMagStdHandle).get_lock()
# cut these down now, faster later
obsObjIDIndexGO = esutil.numpy_util.to_native(obsObjIDIndex[goodObs])
obsBandIndexGO = esutil.numpy_util.to_native(obsBandIndex[goodObs])
obsLUTFilterIndexGO = esutil.numpy_util.to_native(obsLUTFilterIndex[goodObs])
obsExpIndexGO = esutil.numpy_util.to_native(obsExpIndex[goodObs])
obsCCDIndexGO = esutil.numpy_util.to_native(obsCCDIndex[goodObs])
obsSecZenithGO = obsSecZenith[goodObs]
# which observations are actually used in the fit?
# now refer to obsBandIndex[goodObs]
# add GO to index names that are cut to goodObs
# add GOF to index names that are cut to goodObs[obsFitUseGO]
lutIndicesGO = self.fgcmLUT.getIndices(obsLUTFilterIndexGO,
self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
obsCCDIndexGO,
self.fgcmPars.expPmb[obsExpIndexGO])
I0GO = self.fgcmLUT.computeI0(self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
self.fgcmPars.expPmb[obsExpIndexGO],
lutIndicesGO)
I10GO = self.fgcmLUT.computeI1(self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
self.fgcmPars.expPmb[obsExpIndexGO],
lutIndicesGO) / I0GO
qeSysGO = self.fgcmPars.expQESys[obsExpIndexGO]
filterOffsetGO = self.fgcmPars.expFilterOffset[obsExpIndexGO]
# Explicitly update obsMagADU to float64 (internally is 32-bit)
# I0GO, qeSysGO, filterOffsetGO are 64 bit
obsMagGO = obsMagADU[goodObs].astype(np.float64) + \
2.5*np.log10(I0GO) + qeSysGO + filterOffsetGO
if (self.fgcmGray is not None):
# We want to apply the "CCD Gray Crunch"
# make sure we aren't adding something crazy, but this shouldn't happen
# because we're filtering good observations (I hope!)
ok,=np.where(ccdGray[obsExpIndexGO, obsCCDIndexGO] > self.illegalValue)
if np.any(self.ccdGraySubCCD):
obsXGO = snmm.getArray(self.fgcmStars.obsXHandle)[goodObs]
obsYGO = snmm.getArray(self.fgcmStars.obsYHandle)[goodObs]
expCcdHash = (obsExpIndexGO[ok] * (self.fgcmPars.nCCD + 1) +
obsCCDIndexGO[ok])
h, rev = esutil.stat.histogram(expCcdHash, rev=True)
use, = np.where(h > 0)
for i in use:
i1a = rev[rev[i]: rev[i + 1]]
eInd = obsExpIndexGO[ok[i1a[0]]]
cInd = obsCCDIndexGO[ok[i1a[0]]]
field = Cheb2dField(self.deltaMapperDefault['x_size'][cInd],
self.deltaMapperDefault['y_size'][cInd],
ccdGraySubCCDPars[eInd, cInd, :])
fluxScale = field.evaluate(obsXGO[ok[i1a]], obsYGO[ok[i1a]])
obsMagGO[ok[i1a]] += -2.5 * np.log10(np.clip(fluxScale, 0.1, None))
else:
# Regular non-sub-ccd
obsMagGO[ok] += ccdGray[obsExpIndexGO[ok], obsCCDIndexGO[ok]]
# Compute the sub-selected error-squared, using model error when available
obsMagErr2GO = obsMagADUModelErr[goodObs].astype(np.float64)**2.
if (self.computeSEDSlopes):
# first, compute mean mags (code same as below. FIXME: consolidate, but how?)
# make temp vars. With memory overhead
wtSum = np.zeros_like(objMagStdMean, dtype='f8')
objMagStdMeanTemp = np.zeros_like(objMagStdMean, dtype='f8')
add_at_2d(wtSum,
(obsObjIDIndexGO,obsBandIndexGO),
1./obsMagErr2GO)
add_at_2d(objMagStdMeanTemp,
(obsObjIDIndexGO,obsBandIndexGO),
obsMagGO/obsMagErr2GO)
# these are good object/bands that were observed
gd=np.where(wtSum > 0.0)
# and acquire lock to save the values
objMagStdMeanLock.acquire()
objMagStdMean[gd] = objMagStdMeanTemp[gd] / wtSum[gd]
objMagStdMeanErr[gd] = np.sqrt(1./wtSum[gd])
# and release the lock.
objMagStdMeanLock.release()
if (self.useSedLUT):
self.fgcmStars.computeObjectSEDSlopesLUT(goodStars,self.fgcmLUT)
else:
self.fgcmStars.computeObjectSEDSlopes(goodStars)
# compute linearized chromatic correction
deltaStdGO = 2.5 * np.log10((1.0 +
objSEDSlope[obsObjIDIndexGO,
obsBandIndexGO] * I10GO) /
(1.0 + objSEDSlope[obsObjIDIndexGO,
obsBandIndexGO] *
self.I10StdBand[obsBandIndexGO]))
if self.noChromaticCorrections:
# NOT RECOMMENDED
deltaStdGO *= 0.0
# we can only do this for calibration stars.
# must reference the full array to save
# acquire lock when we write to and retrieve from full array
obsMagStdLock.acquire()
obsMagStd[goodObs] = obsMagGO + deltaStdGO
obsDeltaStd[goodObs] = deltaStdGO
# this is cut here
obsMagStdGO = obsMagStd[goodObs]
# we now have a local cut copy, so release
obsMagStdLock.release()
# kick out if we're just computing magstd for all exposures
if (self.allExposures) :
# kick out
return None
# compute mean mags
# we make temporary variables. These are less than ideal because they
# take up the full memory footprint. MAYBE look at making a smaller
# array just for the stars under consideration, but this would make the
# indexing in the np.add.at() more difficult
wtSum = np.zeros_like(objMagStdMean,dtype='f8')
objMagStdMeanTemp = np.zeros_like(objMagStdMean, dtype='f8')
objMagStdMeanNoChromTemp = np.zeros_like(objMagStdMeanNoChrom, dtype='f8')
add_at_2d(wtSum,
(obsObjIDIndexGO,obsBandIndexGO),
1./obsMagErr2GO)
add_at_2d(objMagStdMeanTemp,
(obsObjIDIndexGO,obsBandIndexGO),
obsMagStdGO/obsMagErr2GO)
# And the same thing with the non-chromatic corrected values
add_at_2d(objMagStdMeanNoChromTemp,
(obsObjIDIndexGO,obsBandIndexGO),
obsMagGO/obsMagErr2GO)
# which objects/bands have observations?
gd=np.where(wtSum > 0.0)
# and acquire lock to save the values
objMagStdMeanLock.acquire()
objMagStdMean[gd] = objMagStdMeanTemp[gd] / wtSum[gd]
objMagStdMeanNoChrom[gd] = objMagStdMeanNoChromTemp[gd] / wtSum[gd]
objMagStdMeanErr[gd] = np.sqrt(1./wtSum[gd])
# and release the lock.
objMagStdMeanLock.release()
# this is the end of the _magWorker
def _chisqWorker(self, goodStarsAndObs):
"""
Multiprocessing worker to compute chisq and derivatives for FgcmChisq.
Not to be called on its own.
Parameters
----------
goodStarsAndObs: tuple[2]
(goodStars, goodObs)
"""
# kick out if we're just computing magstd for all exposures
if (self.allExposures) :
# kick out
return None
goodStars = goodStarsAndObs[0]
goodObs = goodStarsAndObs[1]
if self.debug:
thisCore = 0
else:
thisCore = multiprocessing.current_process()._identity[0]
# Set things up
objMagStdMean = snmm.getArray(self.fgcmStars.objMagStdMeanHandle)
objMagStdMeanNoChrom = snmm.getArray(self.fgcmStars.objMagStdMeanNoChromHandle)
objMagStdMeanErr = snmm.getArray(self.fgcmStars.objMagStdMeanErrHandle)
objSEDSlope = snmm.getArray(self.fgcmStars.objSEDSlopeHandle)
objFlag = snmm.getArray(self.fgcmStars.objFlagHandle)
obsObjIDIndex = snmm.getArray(self.fgcmStars.obsObjIDIndexHandle)
obsExpIndex = snmm.getArray(self.fgcmStars.obsExpIndexHandle)
obsBandIndex = snmm.getArray(self.fgcmStars.obsBandIndexHandle)
obsLUTFilterIndex = snmm.getArray(self.fgcmStars.obsLUTFilterIndexHandle)
obsCCDIndex = snmm.getArray(self.fgcmStars.obsCCDHandle) - self.ccdStartIndex
obsFlag = snmm.getArray(self.fgcmStars.obsFlagHandle)
obsSecZenith = snmm.getArray(self.fgcmStars.obsSecZenithHandle)
obsMagADU = snmm.getArray(self.fgcmStars.obsMagADUHandle)
obsMagADUModelErr = snmm.getArray(self.fgcmStars.obsMagADUModelErrHandle)
obsMagStd = snmm.getArray(self.fgcmStars.obsMagStdHandle)
# and the arrays for locking access
objMagStdMeanLock = snmm.getArrayBase(self.fgcmStars.objMagStdMeanHandle).get_lock()
obsMagStdLock = snmm.getArrayBase(self.fgcmStars.obsMagStdHandle).get_lock()
# cut these down now, faster later
obsObjIDIndexGO = esutil.numpy_util.to_native(obsObjIDIndex[goodObs])
obsBandIndexGO = esutil.numpy_util.to_native(obsBandIndex[goodObs])
obsLUTFilterIndexGO = esutil.numpy_util.to_native(obsLUTFilterIndex[goodObs])
obsExpIndexGO = esutil.numpy_util.to_native(obsExpIndex[goodObs])
obsSecZenithGO = obsSecZenith[goodObs]
obsCCDIndexGO = esutil.numpy_util.to_native(obsCCDIndex[goodObs])
# now refer to obsBandIndex[goodObs]
# add GO to index names that are cut to goodObs
# add GOF to index names that are cut to goodObs[obsFitUseGO] (see below)
lutIndicesGO = self.fgcmLUT.getIndices(obsLUTFilterIndexGO,
self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
obsCCDIndexGO,
self.fgcmPars.expPmb[obsExpIndexGO])
I0GO = self.fgcmLUT.computeI0(self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
self.fgcmPars.expPmb[obsExpIndexGO],
lutIndicesGO)
I10GO = self.fgcmLUT.computeI1(self.fgcmPars.expLnPwv[obsExpIndexGO],
self.fgcmPars.expO3[obsExpIndexGO],
self.fgcmPars.expLnTau[obsExpIndexGO],
self.fgcmPars.expAlpha[obsExpIndexGO],
obsSecZenithGO,
self.fgcmPars.expPmb[obsExpIndexGO],
lutIndicesGO) / I0GO
# Compute the sub-selected error-squared, using model error when available
obsMagErr2GO = obsMagADUModelErr[goodObs].astype(np.float64)**2.
obsMagStdLock.acquire()
# If we want to apply the deltas, do it here
if self.applyDelta:
obsMagStd[goodObs] -= self.deltaAbsOffset[obsBandIndexGO]
# Make local copy of mags
obsMagStdGO = obsMagStd[goodObs]
obsMagStdLock.release()
# and acquire lock to save the values
objMagStdMeanLock.acquire()
if self.applyDelta:
gdMeanStar, gdMeanBand = np.where(objMagStdMean[goodStars, :] < 90.0)
objMagStdMean[goodStars[gdMeanStar], gdMeanBand] -= self.deltaAbsOffset[gdMeanBand]
objMagStdMeanGO = objMagStdMean[obsObjIDIndexGO,obsBandIndexGO]
objMagStdMeanErr2GO = objMagStdMeanErr[obsObjIDIndexGO,obsBandIndexGO]**2.
objMagStdMeanLock.release()
# New logic:
# Select out reference stars (if desired)
# Select out non-reference stars
# Compute deltas and chisq for reference stars
# Compute deltas and chisq for non-reference stars
# Compute derivatives...
# Default mask is not to mask
maskGO = np.ones(goodObs.size, dtype=bool)
useRefstars = False
if self.fgcmStars.hasRefstars and not self.ignoreRef:
# Prepare arrays
objRefIDIndex = snmm.getArray(self.fgcmStars.objRefIDIndexHandle)
refMag = snmm.getArray(self.fgcmStars.refMagHandle)
refMagErr = snmm.getArray(self.fgcmStars.refMagErrHandle)
# Are there any reference stars in this set of stars?
use, = np.where(objRefIDIndex[goodStars] >= 0)
if use.size == 0:
# There are no reference stars in this list of stars. That's okay!
useRefstars = False
else:
# Get good observations of reference stars
# This must be two steps because we first need the indices to
# avoid out-of-bounds
goodRefObsGO, = np.where((objRefIDIndex[obsObjIDIndexGO] >= 0) &
((objFlag[obsObjIDIndexGO] & objFlagDict['REFSTAR_OUTLIER']) == 0))
# And check that these are all quality observations
tempUse, = np.where((objMagStdMean[obsObjIDIndexGO[goodRefObsGO],
obsBandIndexGO[goodRefObsGO]] < 90.0) &
(refMag[objRefIDIndex[obsObjIDIndexGO[goodRefObsGO]],
obsBandIndexGO[goodRefObsGO]] < 90.0))
if tempUse.size > 0:
useRefstars = True
goodRefObsGO = goodRefObsGO[tempUse]
if useRefstars:
# Down-select to remove reference stars
maskGO[goodRefObsGO] = False
# which observations are actually used in the fit?
useGO, = np.where(maskGO)
_, obsFitUseGO = esutil.numpy_util.match(self.bandFitIndex,
obsBandIndexGO[useGO])
obsFitUseGO = useGO[obsFitUseGO]
# Now we can compute delta and chisq for non-reference stars
deltaMagGO = obsMagStdGO - objMagStdMeanGO
# Note that this is computed from the model error
obsWeightGO = 1. / obsMagErr2GO
deltaMagWeightedGOF = deltaMagGO[obsFitUseGO] * obsWeightGO[obsFitUseGO]
# For correlation tracking
gsGOF, indGOF = esutil.numpy_util.match(goodStars, obsObjIDIndexGO[obsFitUseGO])
partialChisq = 0.0
partialChisqRef = 0.0
partialChisq = np.sum(deltaMagGO[obsFitUseGO]**2. * obsWeightGO[obsFitUseGO])
# And for the reference stars (if we want)
if useRefstars:
# Only use the specific fit bands, for derivatives
_, obsFitUseGRO = esutil.numpy_util.match(self.bandFitIndex,
obsBandIndexGO[goodRefObsGO])
# useful below
goodRefObsGOF = goodRefObsGO[obsFitUseGRO]
deltaMagGRO = obsMagStdGO[goodRefObsGO] - refMag[objRefIDIndex[obsObjIDIndexGO[goodRefObsGO]],
obsBandIndexGO[goodRefObsGO]]
obsWeightGRO = 1. / (obsMagErr2GO[goodRefObsGO] + refMagErr[objRefIDIndex[obsObjIDIndexGO[goodRefObsGO]],
obsBandIndexGO[goodRefObsGO]]**2.)
deltaRefMagWeightedGROF = deltaMagGRO[obsFitUseGRO] * obsWeightGRO[obsFitUseGRO]
partialChisqRef += np.sum(deltaMagGRO[obsFitUseGRO]**2. * obsWeightGRO[obsFitUseGRO])
partialArray = np.zeros(self.nSums, dtype='f8')
partialArray[-4] = partialChisq
partialArray[-3] = obsFitUseGO.size
if useRefstars:
partialArray[-2] = partialChisqRef
partialArray[-1] = obsFitUseGRO.size
if (self.computeDerivatives):
if self.fitterUnits:
units = self.fgcmPars.stepUnits
else:
units = np.ones(self.fgcmPars.nFitPars)
# this is going to be ugly. wow, how many indices and sub-indices?
# or does it simplify since we need all the obs on a night?
# we shall see! And speed up!
(dLdLnPwvGO,dLdO3GO,dLdLnTauGO,dLdAlphaGO) = (
self.fgcmLUT.computeLogDerivatives(lutIndicesGO,
I0GO))
if (self.fgcmLUT.hasI1Derivatives):
(dLdLnPwvI1GO,dLdO3I1GO,dLdLnTauI1GO,dLdAlphaI1GO) = (
self.fgcmLUT.computeLogDerivativesI1(lutIndicesGO,
I0GO,
I10GO,
objSEDSlope[obsObjIDIndexGO,
obsBandIndexGO]))
dLdLnPwvGO += dLdLnPwvI1GO
dLdO3GO += dLdO3I1GO
dLdLnTauGO += dLdLnTauI1GO
dLdAlphaGO += dLdAlphaI1GO
# we have objMagStdMeanErr[objIndex,:] = \Sum_{i"} 1/\sigma^2_{i"j}
# note that this is summed over all observations of an object in a band
# so that this is already done
# note below that objMagStdMeanErr2GO is the the square of the error,
# and already cut to [obsObjIDIndexGO,obsBandIndexGO]
innerTermGOF = np.zeros(obsFitUseGO.size)
obsExpIndexGOF = obsExpIndexGO[obsFitUseGO]
obsBandIndexGOF = obsBandIndexGO[obsFitUseGO]
obsBandIndexGOFI = obsBandIndexGOF[indGOF]
obsFitUseGOI = obsFitUseGO[indGOF]
obsMagErr2GOFI = obsMagErr2GO[obsFitUseGO[indGOF]]
if not self.freezeStdAtmosphere:
# And more initialization for atmosphere terms
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.nCampaignNights))
expNightIndexGOF = esutil.numpy_util.to_native(self.fgcmPars.expNightIndex[obsExpIndexGOF])
expNightIndexGOFI = expNightIndexGOF[indGOF]
##########
## O3
##########
uNightIndex = np.unique(expNightIndexGOF)
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.nCampaignNights))
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF, obsBandIndexGOFI, expNightIndexGOFI),
dLdO3GO[obsFitUseGOI] / obsMagErr2GOFI)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF] = (dLdO3GO[obsFitUseGOI] -
sumNumerator[gsGOF, obsBandIndexGOFI, expNightIndexGOFI] * objMagStdMeanErr2GO[obsFitUseGOI])
add_at_1d(partialArray[self.fgcmPars.parO3Loc:
(self.fgcmPars.parO3Loc +
self.fgcmPars.nCampaignNights)],
expNightIndexGOF,
2.0 * deltaMagWeightedGOF * innerTermGOF)
partialArray[self.fgcmPars.parO3Loc +
uNightIndex] /= units[self.fgcmPars.parO3Loc +
uNightIndex]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parO3Loc +
uNightIndex] += 1
if useRefstars:
# We assume that the unique nights must be a subset of those above
expNightIndexGROF = esutil.numpy_util.to_native(self.fgcmPars.expNightIndex[obsExpIndexGO[goodRefObsGO[obsFitUseGRO]]])
uRefNightIndex = np.unique(expNightIndexGROF)
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parO3Loc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parO3Loc +
self.fgcmPars.nCampaignNights)],
expNightIndexGROF,
2.0 * deltaRefMagWeightedGROF * dLdO3GO[goodRefObsGOF])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parO3Loc +
uRefNightIndex] /= units[self.fgcmPars.parO3Loc +
uRefNightIndex]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parO3Loc +
uRefNightIndex] += 1
###########
## Alpha
###########
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF, obsBandIndexGOFI, expNightIndexGOFI),
dLdAlphaGO[obsFitUseGOI] / obsMagErr2GOFI)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF] = (dLdAlphaGO[obsFitUseGOI] -
sumNumerator[gsGOF, obsBandIndexGOFI, expNightIndexGOFI] * objMagStdMeanErr2GO[obsFitUseGOI])
add_at_1d(partialArray[self.fgcmPars.parAlphaLoc:
(self.fgcmPars.parAlphaLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF,
2.0 * deltaMagWeightedGOF * innerTermGOF)
partialArray[self.fgcmPars.parAlphaLoc +
uNightIndex] /= units[self.fgcmPars.parAlphaLoc +
uNightIndex]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parAlphaLoc +
uNightIndex] += 1
if useRefstars:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parAlphaLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parAlphaLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF,
2.0 * deltaRefMagWeightedGROF * dLdAlphaGO[goodRefObsGOF])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parAlphaLoc +
uRefNightIndex] /= units[self.fgcmPars.parAlphaLoc +
uRefNightIndex]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parAlphaLoc +
uRefNightIndex] += 1
###########
## PWV External
###########
if (self.fgcmPars.hasExternalPwv and not self.fgcmPars.useRetrievedPwv):
hasExtGOF, = np.where(self.fgcmPars.externalPwvFlag[obsExpIndexGOF])
uNightIndexHasExt = np.unique(expNightIndexGOF[hasExtGOF])
hasExtGOFG, = np.where(~self.fgcmPars.externalPwvFlag[obsExpIndexGOF[indGOF]])
# lnPw Nightly Offset
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[hasExtGOFG],
obsBandIndexGOFI[hasExtGOFG],
expNightIndexGOFI[hasExtGOFG]),
dLdLnPwvGO[obsFitUseGOI[hasExtGOFG]] /
obsMagErr2GOFI[hasExtGOFG])
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[hasExtGOFG]] = (dLdLnPwvGO[obsFitUseGOI[hasExtGOFG]] -
sumNumerator[gsGOF[hasExtGOFG],
obsBandIndexGOFI[hasExtGOFG],
expNightIndexGOFI[hasExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[hasExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parExternalLnPwvOffsetLoc:
(self.fgcmPars.parExternalLnPwvOffsetLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[hasExtGOF],
2.0 * deltaMagWeightedGOF[hasExtGOF] * innerTermGOF[hasExtGOF])
partialArray[self.fgcmPars.parExternalLnPwvOffsetLoc +
uNightIndexHasExt] /= units[self.fgcmPars.parExternalLnPwvOffsetLoc +
uNightIndexHasExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvOffsetLoc +
uNightIndexHasExt] += 1
if useRefstars:
hasExtGROF, = np.where(self.fgcmPars.externalPwvFlag[obsExpIndexGO[goodRefObsGOF]])
uRefNightIndexHasExt = np.unique(expNightIndexGROF[hasExtGROF])
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvOffsetLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvOffsetLoc +
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[hasExtGROF],
2.0 * deltaRefMagWeightedGROF[hasExtGROF] *
dLdLnPwvGO[goodRefObsGOF[hasExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvOffsetLoc +
uRefNightIndexHasExt] /= units[self.fgcmPars.parExternalLnPwvOffsetLoc +
uRefNightIndexHasExt]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvOffsetLoc +
uRefNightIndexHasExt] += 1
# lnPwv Global Scale
# NOTE: this may be wrong. Needs thought.
partialArray[self.fgcmPars.parExternalLnPwvScaleLoc] = 2.0 * (
np.sum(deltaMagWeightedGOF[hasExtGOF] * (
self.fgcmPars.expLnPwv[obsExpIndexGOF[hasExtGOF]] *
dLdLnPwvGO[obsFitUseGO[hasExtGOF]])))
partialArray[self.fgcmPars.parExternalLnPwvScaleLoc] /= units[self.fgcmPars.parExternalLnPwvScaleLoc]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvScaleLoc] += 1
if useRefstars:
temp = np.sum(2.0 * deltaRefMagWeightedGROF[hasExtGROF] *
dLdLnPwvGO[goodRefObsGOF[hasExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvScaleLoc] = temp / units[self.fgcmPars.parExternalLnPwvScaleLoc]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parExternalLnPwvScaleLoc] += 1
################
## PWV Retrieved
################
if (self.fgcmPars.useRetrievedPwv):
hasRetrievedPwvGOF, = np.where((self.fgcmPars.compRetrievedLnPwvFlag[obsExpIndexGOF] &
retrievalFlagDict['EXPOSURE_RETRIEVED']) > 0)
hasRetrievedPWVGOFG, = np.where((self.fgcmPars.compRetrievedLnPwvFlag[obsExpIndexGOF[indGOF]] &
retrievalFlagDict['EXPOSURE_RETRIEVED']) > 0)
if hasRetrievedPwvGOF.size > 0:
# note this might be zero-size on first run
# lnPwv Retrieved Global Scale
# This may be wrong
partialArray[self.fgcmPars.parRetrievedLnPwvScaleLoc] = 2.0 * (
np.sum(deltaMagWeightedGOF[hasRetrievedPwvGOF] * (
self.fgcmPars.expLnPwv[obsExpIndexGOF[hasRetreivedPwvGOF]] *
dLdLnPwvGO[obsFitUseGO[hasRetrievedPwvGOF]])))
partialArray[self.fgcmPars.parRetrievedLnPwvScaleLoc] /= units[self.fgcmPars.parRetrievedLnPwvScaleLoc]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvScaleLoc] += 1
if useRefstars:
hasRetrievedPwvGROF, = np.where((self.fgcmPars.computeRetrievedLnPwvFlag[obsExpIndexGO[goodRefObsGOF]] &
(retrievalFlagDict['EXPOSURE_RETRIEVED']) > 0))
temp = np.sum(2.0 * deltaRefMagWeightedGROF[hasRetrievedPwvGROF] *
dLdLnPwvGO[goodRefObsGROF[hasRetrievedPwvGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvScaleLoc] = temp / units[self.fgcmPars.parRetrievedLnPwvScaleLoc]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvScaleLoc] += 1
if self.fgcmPars.useNightlyRetrievedPwv:
# lnPwv Retrieved Nightly Offset
uNightIndexHasRetrievedPwv = np.unique(expNightIndexGOF[hasRetrievedPwvGOF])
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[hasRetrievedPwvGOFG],
obsBandIndexGOFI[hasRetrievedPwvGOFG],
expNightIndexGOFI[hasRetrievedPwvGOFG]),
dLdLnPwvGO[obsFitUseGOI[hasRetrievedPwvGOFG]] /
obsMagErr2GOFI[hasRetrievedPwvGOFG])
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[hasRetrievedPwvGOFG]] = (dLdLnPwvGO[obsFitUseGOI[hasRetrievedPwvGOFG]] -
sumNumerator[gsGOF[hasRetrievedPwvGOFG],
obsBandIndexGOFI[hasRetrievedPwvGOFG],
expNightIndexGOFI[hasRetrievedPwvGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[hasRetrievedPwvGOFG]])
add_at_1d(partialArray[self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc:
(self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc+
self.fgcmPars.nCampaignNights)],
2.0 * deltaMagWeightedGOF[hasRetrievedPwvGOF] * innerTermGOF[hasRetrievedPwvGOF])
partialArray[self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uNightIndexHasRetrievedPwv] /= units[self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uNightIndexHasRetrievedPwv]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uNightIndexHasRetrievedPwv] += 1
if useRefstars:
uRefNightIndexHasRetrievedPWV = np.unique(expNightIndexGROF[hasRetrievedPwvGROF])
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[hasRetrievedPwvGROF],
2.0 * deltaRefMagWeightedGROF *
dLdLnPwvGO[goodRefObsGOF[hasRetrievedPwvGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uRefNightIndexHasRetrievedPwv] /= units[self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uRefNightIndexHasRetrievedPwv]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvNightlyOffsetLoc +
uRefNightIndexHasRetrievedPwv] += 1
else:
# lnPwv Retrieved Global Offset
# This may be wrong
partialArray[self.fgcmPars.parRetrievedLnPwvOffsetLoc] = 2.0 * (
np.sum(deltaMagWeightedGOF[hasRetrievedPwvGOF] * (
dLdLnPwvGO[obsFitUseGO[hasRetrievedPwvGOF]])))
partialArray[self.fgcmPars.parRetrievedLnPwvOffsetLoc] /= units[self.fgcmPars.parRetrievedLnPwvOffsetLoc]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvOffsetLoc] += 1
if useRefstars:
temp = np.sum(2.0 * deltaRefMagWeightedGROF *
dLdLnPwvGO[goodRefObsGOF[hasRetrievedPwvGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvOffsetLoc] = temp / units[self.fgcmPars.parRetrievedLnPwvOffsetLoc]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parRetrievedLnPwvOffsetLoc] += 1
else:
###########
## Pwv No External
###########
noExtGOF, = np.where(~self.fgcmPars.externalPwvFlag[obsExpIndexGOF])
uNightIndexNoExt = np.unique(expNightIndexGOF[noExtGOF])
noExtGOFG, = np.where(~self.fgcmPars.externalPwvFlag[obsExpIndexGOF[indGOF]])
# lnPwv Nightly Intercept
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]),
dLdLnPwvGO[obsFitUseGOI[noExtGOFG]] /
obsMagErr2GOFI[noExtGOFG])
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[noExtGOFG]] = (dLdLnPwvGO[obsFitUseGOI[noExtGOFG]] -
sumNumerator[gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[noExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parLnPwvInterceptLoc:
(self.fgcmPars.parLnPwvInterceptLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[noExtGOF],
2.0 * deltaMagWeightedGOF[noExtGOF] * innerTermGOF[noExtGOF])
partialArray[self.fgcmPars.parLnPwvInterceptLoc +
uNightIndexNoExt] /= units[self.fgcmPars.parLnPwvInterceptLoc +
uNightIndexNoExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvInterceptLoc +
uNightIndexNoExt] += 1
if useRefstars:
noExtGROF, = np.where(~self.fgcmPars.externalPwvFlag[obsExpIndexGO[goodRefObsGOF]])
uRefNightIndexNoExt = np.unique(expNightIndexGROF[noExtGROF])
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvInterceptLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvInterceptLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[noExtGROF],
2.0 * deltaRefMagWeightedGROF[noExtGROF] *
dLdLnPwvGO[goodRefObsGOF[noExtGROF]])
partialArray[2*self.fgcmPars.nFitPars + self.fgcmPars.parLnPwvInterceptLoc + uRefNightIndexNoExt] = units[self.fgcmPars.parLnPwvInterceptLoc + uRefNightIndexNoExt]
partialArray[3*self.fgcmPars.nFitPars + self.fgcmPars.parLnPwvInterceptLoc + uRefNightIndexNoExt] += 1
# lnPwv Nightly Slope
dLdLnPwvSlopeGOFI = self.fgcmPars.expDeltaUT[obsExpIndexGOF[indGOF]] * dLdLnPwvGO[obsFitUseGOI]
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]),
(dLdLnPwvSlopeGOFI[noExtGOFG] /
obsMagErr2GOFI[noExtGOFG]))
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[noExtGOFG]] = (dLdLnPwvSlopeGOFI[noExtGOFG] -
sumNumerator[gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[noExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parLnPwvSlopeLoc:
(self.fgcmPars.parLnPwvSlopeLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[noExtGOF],
2.0 * deltaMagWeightedGOF[noExtGOF] * innerTermGOF[noExtGOF])
partialArray[self.fgcmPars.parLnPwvSlopeLoc +
uNightIndexNoExt] /= units[self.fgcmPars.parLnPwvSlopeLoc +
uNightIndexNoExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvSlopeLoc +
uNightIndexNoExt] += 1
if useRefstars:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvSlopeLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvSlopeLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[noExtGROF],
2.0 * deltaRefMagWeightedGROF[noExtGROF] *
self.fgcmPars.expDeltaUT[obsExpIndexGO[goodRefObsGOF[noExtGROF]]] *
dLdLnPwvGO[goodRefObsGOF[noExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvSlopeLoc +
uRefNightIndexNoExt] /= units[self.fgcmPars.parLnPwvSlopeLoc +
uRefNightIndexNoExt]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvSlopeLoc +
uRefNightIndexNoExt] += 1
# lnPwv Nightly Quadratic
if self.useQuadraticPwv:
dLdLnPwvQuadraticGOFI = self.fgcmPars.expDeltaUT[obsExpIndexGOF[indGOF]]**2. * dLdLnPwvGO[obsFitUseGOI]
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.nCampaignNights))
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]),
(dLdLnPwvQuadraticGOFI[noExtGOFG] /
obsMagErr2GOFI[noExtGOFG]))
innerTermGOF = np.zeros(obsFitUseGO.size)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[noExtGOFG]] = (dLdLnPwvQuadraticGOFI[noExtGOFG] -
sumNumerator[gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[noExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parLnPwvQuadraticLoc:
(self.fgcmPars.parLnPwvQuadraticLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[noExtGOF],
2.0 * deltaMagWeightedGOF[noExtGOF] * innerTermGOF[noExtGOF])
partialArray[self.fgcmPars.parLnPwvQuadraticLoc +
uNightIndexNoExt] /= units[self.fgcmPars.parLnPwvQuadraticLoc +
uNightIndexNoExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvQuadraticLoc +
uNightIndexNoExt] += 1
if useRefstars:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvQuadraticLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvQuadraticLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[noExtGROF],
2.0 * deltaRefMagWeightedGROF[noExtGROF] *
self.fgcmPars.expDeltaUT[obsExpIndexGO[goodRefObsGOF[noExtGROF]]]**2. *
dLdLnPwvGO[goodRefObsGOF[noExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvQuadraticLoc +
uRefNightIndexNoExt] /= units[self.fgcmPars.parLnPwvQuadraticLoc +
uRefNightIndexNoExt]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parLnPwvQuadraticLoc +
uRefNightIndexNoExt] += 1
#############
## Tau External
#############
if (self.fgcmPars.hasExternalTau):
# NOT IMPLEMENTED PROPERLY YET
raise NotImplementedError("external tau not implemented.")
###########
## Tau No External
###########
noExtGOF, = np.where(~self.fgcmPars.externalTauFlag[obsExpIndexGOF])
uNightIndexNoExt = np.unique(expNightIndexGOF[noExtGOF])
noExtGOFG, = np.where(~self.fgcmPars.externalTauFlag[obsExpIndexGOF[indGOF]])
# lnTau Nightly Intercept
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]),
dLdLnTauGO[obsFitUseGOI[noExtGOFG]] /
obsMagErr2GOFI[noExtGOFG])
innerTermGOF = np.zeros(obsFitUseGO.size)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[noExtGOFG]] = (dLdLnTauGO[obsFitUseGOI[noExtGOFG]] -
sumNumerator[gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[noExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parLnTauInterceptLoc:
(self.fgcmPars.parLnTauInterceptLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[noExtGOF],
2.0 * deltaMagWeightedGOF[noExtGOF] * innerTermGOF[noExtGOF])
partialArray[self.fgcmPars.parLnTauInterceptLoc +
uNightIndexNoExt] /= units[self.fgcmPars.parLnTauInterceptLoc +
uNightIndexNoExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauInterceptLoc +
uNightIndexNoExt] += 1
if useRefstars:
noExtGROF, = np.where(~self.fgcmPars.externalTauFlag[obsExpIndexGO[goodRefObsGOF]])
uRefNightIndexNoExt = np.unique(expNightIndexGROF[noExtGROF])
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauInterceptLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauInterceptLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[noExtGROF],
2.0 * deltaRefMagWeightedGROF[noExtGROF] *
dLdLnTauGO[goodRefObsGOF[noExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauInterceptLoc +
uRefNightIndexNoExt] /= units[self.fgcmPars.parLnTauInterceptLoc +
uRefNightIndexNoExt]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauInterceptLoc +
uRefNightIndexNoExt] += 1
# lnTau nightly slope
dLdLnTauSlopeGOFI = self.fgcmPars.expDeltaUT[obsExpIndexGOF[indGOF]] * dLdLnTauGO[obsFitUseGOI]
sumNumerator[:, :, :] = 0.0
add_at_3d(sumNumerator,
(gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]),
(dLdLnTauSlopeGOFI[noExtGOFG] /
obsMagErr2GOFI[noExtGOFG]))
innerTermGOF[:] = 0.0
innerTermGOF[indGOF[noExtGOFG]] = (dLdLnTauSlopeGOFI[noExtGOFG] -
sumNumerator[gsGOF[noExtGOFG],
obsBandIndexGOFI[noExtGOFG],
expNightIndexGOFI[noExtGOFG]] *
objMagStdMeanErr2GO[obsFitUseGOI[noExtGOFG]])
add_at_1d(partialArray[self.fgcmPars.parLnTauSlopeLoc:
(self.fgcmPars.parLnTauSlopeLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGOF[noExtGOF],
2.0 * deltaMagWeightedGOF[noExtGOF] * innerTermGOF[noExtGOF])
partialArray[self.fgcmPars.parLnTauSlopeLoc +
uNightIndexNoExt] /= units[self.fgcmPars.parLnTauSlopeLoc +
uNightIndexNoExt]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauSlopeLoc +
uNightIndexNoExt] += 1
if useRefstars:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauSlopeLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauSlopeLoc+
self.fgcmPars.nCampaignNights)],
expNightIndexGROF[noExtGROF],
2.0 * deltaRefMagWeightedGROF[noExtGROF] *
self.fgcmPars.expDeltaUT[obsExpIndexGO[goodRefObsGOF[noExtGROF]]] *
dLdLnTauGO[goodRefObsGOF[noExtGROF]])
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauSlopeLoc +
uRefNightIndexNoExt] /= units[self.fgcmPars.parLnTauSlopeLoc +
uRefNightIndexNoExt]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parLnTauSlopeLoc +
uRefNightIndexNoExt] += 1
##################
## Washes (QE Sys)
##################
# Note that we do this derivative even if we've frozen the atmosphere.
expWashIndexGOF = self.fgcmPars.expWashIndex[obsExpIndexGOF]
# Wash Intercept
if self.instrumentParsPerBand:
# We have per-band intercepts
# Non-fit bands will be given the mean of the others (in fgcmParameters),
# because they aren't in the chi2.
uWashBandIndex = np.unique(np.ravel_multi_index((obsBandIndexGOF,
expWashIndexGOF),
self.fgcmPars.parQESysIntercept.shape))
ravelIndexGOF = np.ravel_multi_index((obsBandIndexGOF,
expWashIndexGOF),
self.fgcmPars.parQESysIntercept.shape)
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.parQESysIntercept.size))
add_at_3d(sumNumerator,
(gsGOF, obsBandIndexGOFI, ravelIndexGOF[indGOF]),
1.0 / obsMagErr2GOFI)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF] = (1.0 - sumNumerator[gsGOF,
obsBandIndexGOFI,
ravelIndexGOF[indGOF]] *
objMagStdMeanErr2GO[obsFitUseGOI])
add_at_1d(partialArray[self.fgcmPars.parQESysInterceptLoc:
(self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.parQESysIntercept.size)],
ravelIndexGOF,
2.0 * deltaMagWeightedGOF * innerTermGOF)
partialArray[self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex] /= units[self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex] += 1
# And reference stars
if useRefstars and self.useRefStarsWithInstrument:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.parQESysIntercept.size)],
np.ravel_multi_index((obsBandIndexGO[goodRefObsGOF],
esutil.numpy_util.to_native(self.fgcmPars.expWashIndex[obsExpIndexGO[goodRefObsGOF]])),
self.fgcmPars.parQESysIntercept.shape),
2.0 * deltaRefMagWeightedGROF)
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex] /= units[self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashBandIndex] += 1
else:
# We have one gray mirror term for all bands
uWashIndex = np.unique(expWashIndexGOF)
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.nWashIntervals))
add_at_3d(sumNumerator,
(gsGOF, obsBandIndexGOFI, expWashIndexGOF[indGOF]),
1.0 / obsMagErr2GOFI)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF] = (1.0 - sumNumerator[gsGOF,
obsBandIndexGOFI,
expWashIndexGOF[indGOF]] *
objMagStdMeanErr2GO[obsFitUseGOI])
add_at_1d(partialArray[self.fgcmPars.parQESysInterceptLoc:
(self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.nWashIntervals)],
expWashIndexGOF,
2.0 * deltaMagWeightedGOF * innerTermGOF)
partialArray[self.fgcmPars.parQESysInterceptLoc +
uWashIndex] /= units[self.fgcmPars.parQESysInterceptLoc +
uWashIndex]
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashIndex] += 1
# We don't want to use the reference stars for the wash intercept
# or slope by default because if they aren't evenly sampled it can
# cause the fitter to go CRAZY. Though improvements in slope computation
# means this should be revisited.
if useRefstars and self.useRefStarsWithInstrument:
add_at_1d(partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc:
(2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
self.fgcmPars.nWashIntervals)],
esutil.numpy_util.to_native(self.fgcmPars.expWashIndex[obsExpIndexGO[goodRefObsGOF]]),
2.0 * deltaRefMagWeightedGROF)
partialArray[2*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashIndex] /= units[self.fgcmPars.parQESysInterceptLoc +
uWashIndex]
partialArray[3*self.fgcmPars.nFitPars +
self.fgcmPars.parQESysInterceptLoc +
uWashIndex] += 1
#################
## Filter offset
#################
sumNumerator = np.zeros((goodStars.size, self.fgcmPars.nBands, self.fgcmPars.nLUTFilter))
add_at_3d(sumNumerator,
(gsGOF, obsBandIndexGOFI, obsLUTFilterIndexGO[obsFitUseGOI]),
1.0 / obsMagErr2GOFI)
innerTermGOF[:] = 0.0
innerTermGOF[indGOF] = (1.0 - sumNumerator[gsGOF,
obsBandIndexGOFI,
obsLUTFilterIndexGO[obsFitUseGOI]] *
objMagStdMeanErr2GO[obsFitUseGOI])
add_at_1d(partialArray[self.fgcmPars.parFilterOffsetLoc:
(self.fgcmPars.parFilterOffsetLoc +
self.fgcmPars.nLUTFilter)],
obsLUTFilterIndexGO[obsFitUseGO],
2.0 * deltaMagWeightedGOF * innerTermGOF)
partialArray[self.fgcmPars.parFilterOffsetLoc:
(self.fgcmPars.parFilterOffsetLoc +
self.fgcmPars.nLUTFilter)] /= units[self.fgcmPars.parFilterOffsetLoc:
(self.fgcmPars.parFilterOffsetLoc +
self.fgcmPars.nLUTFilter)]
# Note that using the refstars with the filter offset derivative
# seems to make things go haywire, so don't do that.
# Now set those to zero the derivatives we aren't using
partialArray[self.fgcmPars.parFilterOffsetLoc:
(self.fgcmPars.parFilterOffsetLoc +
self.fgcmPars.nLUTFilter)][~self.fgcmPars.parFilterOffsetFitFlag] = 0.0
uOffsetIndex, = np.where(self.fgcmPars.parFilterOffsetFitFlag)
partialArray[self.fgcmPars.nFitPars +
self.fgcmPars.parFilterOffsetLoc +
uOffsetIndex] += 1
# note that this store doesn't need locking because we only access
# a given array from a single process
totalArr = snmm.getArray(self.totalHandleDict[thisCore])
totalArr[:] = totalArr[:] + partialArray
# and we're done
return None
def __getstate__(self):
# Don't try to pickle the logger.
state = self.__dict__.copy()
del state['fgcmLog']
return state
| StarcoderdataPython |
11396585 | <gh_stars>1-10
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
.. _dbus-fusion:
Fusioninventory module
~~~~~~~~~~~~~~~~~~~~~~
To. Do.
"""
import os
import re
import shutil
import subprocess
import dbus.service
import hashlib
import logging
from pkg_resources import resource_filename #@UnresolvedImport
from lxml import etree, objectify
from clacks.common import Environment
from clacks.common.components import Plugin
from clacks.dbus import get_system_bus
from base64 import b64encode
class InventoryException(Exception):
pass
class DBusInventoryHandler(dbus.service.Object, Plugin):
""" This handler collect client inventory data """
def __init__(self):
conn = get_system_bus()
dbus.service.Object.__init__(self, conn, '/org/clacks/inventory')
self.env = Environment.getInstance()
@dbus.service.method('org.clacks', in_signature='', out_signature='s')
def inventory(self):
"""
Start inventory client and transform the results into a clacks usable way.
We should support other invetory clients, later.
"""
# Added other report types here
result = self.load_from_fusion_agent()
return result
def load_from_fusion_agent(self):
# Execute the fusion-invetory agent
#
# Unfortunately we cannot specify a target file for the generated
# xml-report directly, we can only name a path where the results should go to.
# So we've to detect the generated file by ourselves later.
# Create the target directory for the result.
path = '/tmp/fusion_tmp'
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
# Execute the inventory agent.
try:
process = subprocess.Popen(["fusioninventory-agent", "--local=" + path], \
shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
except OSError as e:
log = logging.getLogger(__name__)
log.error("failed to invoke fusion-inventory agent: %s" % str(e))
return None
# Try to read the generated xml result.
flist = os.listdir(path)
result = None
if flist:
# Try to extract HardwareUUID
tmp = objectify.fromstring(open(os.path.join('/tmp/fusion_tmp', flist[0])).read())
huuid = tmp.xpath('/REQUEST/CONTENT/HARDWARE/UUID/text()')[0]
# Open the first found result file and transform it into a clacks usable
# event-style xml.
try:
xml_doc = etree.parse(os.path.join('/tmp/fusion_tmp', flist[0]))
xslt_doc = etree.parse(resource_filename("clacks.dbus.plugins.inventory", "data/fusionToClacks.xsl"))
transform = etree.XSLT(xslt_doc)
result = etree.tostring(transform(xml_doc))
except Exception as e:
raise InventoryException("Failed to read and transform fusion-inventory-agent results (%s)!" % (
os.path.join('/tmp/fusion_tmp', flist[0])))
else:
raise InventoryException("No report files could be found in '%s'" % (path,))
# Add the ClientUUID and the encoded HardwareUUID to the result
result = re.sub(r"%%CUUID%%", self.env.uuid, result)
result = re.sub(r"%%HWUUID%%", self.hash_hardware_uuid(huuid), result)
# Remove temporary created files created by the inventory agent.
shutil.rmtree('/tmp/fusion_tmp')
return result
def hash_hardware_uuid(self, huuid):
"""
Hash the hardware uuid, it is not secure to send the it as clear text.
"""
sha = hashlib.sha256()
sha.update(huuid)
return(b64encode(sha.digest()))
| StarcoderdataPython |
9774388 | <reponame>yaniv-aknin/flask-s3upload
#!/usr/bin/env python
from base64 import b64encode
from datetime import datetime, timedelta
from json import dumps
from os import environ
from uuid import uuid4
import hmac
import sha
from flask import Flask, render_template, jsonify
app = Flask(__name__)
for key in ('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_S3_BUCKET_URL'):
app.config[key] = environ[key]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/params')
def params():
def make_policy():
policy_object = {
"expiration": (datetime.now() + timedelta(hours=24)).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
"conditions": [
{ "bucket": "ajax-cors-s3-upload" },
{ "acl": "public-read" },
["starts-with", "$key", "uploads/"],
{ "success_action_status": "201" }
]
}
return b64encode(dumps(policy_object).replace('\n', '').replace('\r', ''))
def sign_policy(policy):
return b64encode(hmac.new(app.config['AWS_SECRET_ACCESS_KEY'], policy, sha).digest())
policy = make_policy()
return jsonify({
"policy": policy,
"signature": sign_policy(policy),
"key": "uploads/" + uuid4().hex + ".bin",
"success_action_redirect": "/"
})
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3219309 | from __future__ import annotations
from dataclasses import dataclass
from logging import Logger, getLogger
from pathlib import PosixPath
from typing import Optional, Union
import h5py
import numpy as np
from .application_info import get_application_info
from .fast5s import (
FAST5_ROOT,
KEY,
BaseFile,
BulkFile,
ChannelCalibration,
ContextTagsBase,
ContextTagsBulk,
RawSignal,
TrackingId,
channel_path_for_read_id,
signal_path_for_read_id,
)
from .hdf5 import (
HasAttrs,
HDF5_Attributes,
HDF5_Dataset,
HDF5_DatasetSerialableDataclass,
HDF5_DatasetSerializable,
HDF5_DatasetSerializationException,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializationException,
HDF5_GroupSerializing,
HDF5_SerializationException,
HDF5_Type,
NumpyArrayLike,
get_class_for_name,
hdf5_dtype,
)
from .signals import Signal, HDF5_Signal
from .utils.classify import CLASSIFICATION_PATH
from .utils.configuration import SegmentConfiguration
from .utils.core import PathLikeOrString, ReadId, Window, WindowsByChannel, dataclass_fieldnames
from .utils.filtering import (
FILTER_PATH,
FilterPlugin,
Filters,
FilterSet,
HDF5_FilterSet,
RangeFilter,
)
@dataclass(frozen=True)
class CAPTURE_PATH:
ROOT = "/"
CONTEXT_TAGS = "/Meta/context_tags"
TRACKING_ID = "/Meta/tracking_id"
SUB_RUN = "/Meta/tracking_id/sub_run"
SEGMENTATION = "/Meta/Segmentation"
CAPTURE_WINDOWS = "/Meta/Segmentation/capture_windows"
CONTEXT_ID = "/Meta/Segmentation/context_id"
CAPTURE_CRITERIA = "/Meta/Segmentation/capture_criteria"
@classmethod
def FOR_READ_ID(cls, read_id: ReadId) -> str:
path = str(PosixPath(CAPTURE_PATH.ROOT, read_id))
return path
@classmethod
def CHANNEL_FOR_READ_ID(cls, read_id: ReadId) -> str:
path = str(PosixPath(CAPTURE_PATH.FOR_READ_ID(read_id), KEY.CHANNEL_ID))
return path
@classmethod
def SIGNAL_FOR_READ_ID(cls, read_id: ReadId) -> str:
path = str(PosixPath(CAPTURE_PATH.FOR_READ_ID(read_id), KEY.SIGNAL))
return path
@dataclass(frozen=True)
class ContextTagsCapture(ContextTagsBase):
"""A Context Tags group in a Capture Fast5 file.
We have separate classes for Bulk files `ContextTagsBulk`,
and Capture files `ContextTagsCapture` because their fields
vary slightly.
"""
bulk_filename: str
@classmethod
def from_context_tags_bulk_group(cls, bulk_tags_group: HDF5_Group) -> ContextTagsCapture:
capture_fields = dataclass_fieldnames(cls)
context_tags = {}
for key, value in bulk_tags_group.objects_from_attrs().items():
if key == "filename":
# "Filename" in Bulk is named "bulk_filename" in Capture.
context_tags["bulk_filename"] = value
else:
context_tags[key] = value
tags = cls.__new__(cls)
tags.__init__(**context_tags)
return tags
@dataclass(frozen=True)
class Channel(HDF5_GroupSerialableDataclass):
"""Channel-specific information saved for each read."""
channel_number: int
calibration: ChannelCalibration
open_channel_pA: int
sampling_rate: int
def name(self) -> str:
channel_id = f"channel_id"
return channel_id
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
"""Returns this object as an HDF5 Group."""
my_group: HDF5_Group = super().as_group(parent_group)
for field_name, field_value in vars(self).items():
if isinstance(field_value, HDF5_GroupSerializable):
# This value is actually its own group.
# So we create a new group rooted at our dataclass's group
# And assign it the value of whatever the group of the value is.
my_group.require_group(field_name)
field_group = field_value.as_group(my_group, log=log)
elif isinstance(field_value, ChannelCalibration):
for calibration_key, value in vars(field_value).items():
my_group.create_attr(calibration_key, value)
else:
my_group.create_attr(field_name, field_value)
return my_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
log = log if log is not None else getLogger()
if not log:
log = getLogger()
my_instance = cls.__new__(cls)
# First, copy over attrs:
for name, value in group.attrs.items():
object.__setattr__(my_instance, name, value)
# Then, copy over any datasets or groups.
for name, value in group.items():
if isinstance(value, h5py.Dataset):
# Assuming we're storing a numpy array as this dataset
buffer = np.empty(value.shape, dtype=value.dtype)
# Copies the values into our buffer
value.read_direct(buffer)
object.__setattr__(my_instance, name, buffer)
elif isinstance(value, h5py.Group):
# If it's a group, we have to do a little more work
# 1) Find the class described by the group
# 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.
# 1.2) Verify that that class has a method to create an instance group a group.
# 2) Create a new class instance from that group
# 3) Set this object's 'name' field to the object we just created.
try:
ThisClass = get_class_for_name(name)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name} (group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
# assert get_class_for_name(name) and isinstance(), f"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!"
try:
this_instance = ThisClass.from_group(value, log=log)
except AttributeError as e:
serial_exception = HDF5_GroupSerializationException(
f"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.",
e,
group=value,
)
log.exception(serial_exception.msg, serial_exception)
raise serial_exception
object.__setattr__(my_instance, name, this_instance)
return my_instance
@dataclass(frozen=True)
class SubRun(HDF5_GroupSerialableDataclass):
"""If the bulk fast5 contains multiple runs (shorter sub-runs throughout
the data collection process), this can be used to record additional
context about the sub run: (id : str, offset : int, and
duration : int). `id` is the identifier for the sub run,
`offset` is the time the sub run starts in the bulk fast5,
measured in #/time series points.
"""
id: str
offset: int
duration: int
@dataclass(frozen=True)
class CaptureTrackingId(TrackingId):
sub_run: SubRun
@dataclass(frozen=True, init=False)
class CaptureWindows(WindowsByChannel):
def name(self) -> str:
return "capture_windows"
@dataclass(init=False)
class Read:
read_id: str
channel_id: Channel
Signal: Signal # This is the raw signal, fresh from the Fast5.
def __init__(self, read_id: ReadId, channel_id: Channel, signal: Signal):
self.read_id = read_id
self.channel_id = channel_id
self.signal = signal#.as_dataset(self.as_group("/"))
@dataclass(init=False)
class HDF5_Read(Read, HDF5_GroupSerialableDataclass):
def __init__(self, read_id: ReadId, channel_id: Channel, signal: Signal, parent_group: HDF5_Group):
super().__init__(read_id, channel_id, signal)
read_group = parent_group.require_group(self.name())
#HDF5_Signal(signal, read_group)
channel_id.as_group(read_group)
signaldataset = read_group.require_dataset("signal", data=signal, dtype=signal.dtype, shape=signal.shape)
print(f"{signaldataset!r}")
def name(self) -> str:
return self.read_id
@dataclass
class SegmentationMeta(HDF5_GroupSerialableDataclass):
segementer: str
segementer_version: str
capture_criteria: FilterSet
terminal_captures_only: bool
open_channel_prior_mean: float
open_channel_prior_stdv: float
good_channels: HDF5_DatasetSerializable
capture_windows: CaptureWindows
class CaptureFile(BaseFile):
def __init__(
self,
capture_filepath: PathLikeOrString,
mode: str = "r",
logger: Logger = getLogger(),
):
# TODO: Black doesn't like the formatting here, can't figure out why.
# fmt: off
"""Capture file.
Parameters
----------
capture_filepath : PathLikeOrString
Path to the capture file. Capture files are the result of running
`poretitioner segment` on a bulk file.
mode : str, optional
File mode, valid modes are:
- "r" Readonly, file must exist (default)
- "r+" Read/write, file must exist
- "w" Create file, truncate if exists
- "w-" or "x" Create file, fail if exists
- "a" Read/write if exists, create otherwise
logger : Logger, optional
Logger to use, by default getLogger()
Raises
------
OSError
Capture file couldn't be opened
(e.g. didn't exist, OS Resource temporarily unavailable).
Details in message.
ValueError
Capture file had validation errors, details in message.
"""
# TODO: Black doesn't like the formatting here, can't figure out why.
# fmt: on
logger.debug(f"Creating capture file at {capture_filepath} in mode ({mode})")
super().__init__(capture_filepath, mode=mode, logger=logger)
# Creates /Filters
self.f5.require_group(FILTER_PATH.ROOT)
# Creates /Classification
self.f5.require_group(CLASSIFICATION_PATH.ROOT)
if not self.filepath.exists():
error_msg = f"Capture fast5 file does not exist at path: {self.filepath}. Make sure the capture file is in this location."
raise OSError(error_msg)
# @property
# TODO: get the sub run info stored from initialization
# def sub_run(self):
# self.f5[CAPTURE_PATH.CONTEXT_TAGS].attrs
def initialize_from_bulk(
self,
bulk_f5: BulkFile,
segment_config: SegmentConfiguration,
capture_criteria: Optional[Filters] = None,
sub_run: Optional[SubRun] = None,
log: Logger = None,
):
"""Initializes the skeleton of a Capture fast5 file from the metadata of a Bulk fast5 file.
This should be done before
Parameters
----------
bulk_f5 : BulkFile
Bulk Fast5 file, generated from an Oxford Nanopore experiment.
capture_criteria : Optional[FilterSet]
Set of filters that define what signals could even potentially be a capture, by default None.
segment_config : SegmentConfiguration
General configuration for the segmenter.
sub_run : Optional[SubRun], optional
[description], by default None
Raises
------
ValueError
[description]
"""
# Only supporting range capture_criteria for now (MVP). This should be expanded to all FilterPlugins: https://github.com/uwmisl/poretitioner/issues/67
# Referencing spec v0.1.1
# /Meta/context_tags
capture_context_tags_group = self.f5.require_group(CAPTURE_PATH.CONTEXT_TAGS)
bulk_context_tags = bulk_f5.context_tags_group
context_tags_capture = ContextTagsCapture.from_context_tags_bulk_group(bulk_context_tags)
capture_context_tags_group = context_tags_capture.as_group(
capture_context_tags_group.parent, log=log
)
# capture_context_tags_group.attrs.create(key, value, dtype=hdf5_dtype(value))
# bulk_f5_fname = bulk_f5.filename
# capture_context_tags_group.attrs.create(
# "bulk_filename", bulk_f5_fname, dtype=hdf5_dtype(bulk_f5_fname)
# )
# sampling_frequency = bulk_f5.sampling_rate
# capture_context_tags_group.attrs.create("sample_frequency", sampling_frequency, dtype=hdf5_dtype(sampling_frequency))
# /Meta/tracking_id
capture_tracking_id_group = self.f5.require_group(CAPTURE_PATH.TRACKING_ID)
for key, value in bulk_f5.tracking_id_group.get_attrs().items():
capture_tracking_id_group.create_attr(key, value)
# /
if sub_run is not None:
subrun_group = HDF5_Group(self.f5.require_group(CAPTURE_PATH.SUB_RUN))
sub_run.as_group(subrun_group.parent, log=log)
# id = sub_run.sub_run_id
# offset = sub_run.sub_run_offset
# duration = sub_run.sub_run_duration
# capture_tracking_id_group.attrs.create(
# "sub_run_id", id, dtype=hdf5_dtype(id)
# )
# capture_tracking_id_group.attrs.create("sub_run_offset", offset)
# capture_tracking_id_group.attrs.create("sub_run_duration", duration)
# /Meta/Segmentation
# TODO: define config param structure : https://github.com/uwmisl/poretitioner/issues/27
# config = {"param": "value",
# "capture_criteria": {"f1": (min, max), "f2: (min, max)"}}
capture_segmentation_group = self.f5.require_group(CAPTURE_PATH.SEGMENTATION)
capture_windows_group = self.f5.require_group(CAPTURE_PATH.CAPTURE_WINDOWS)
segmenter_name = get_application_info().name
version = get_application_info().data_schema_version
good_channels = [1] # TODO: Pass in good channels
filters = HDF5_FilterSet(segment_config.capture_criteria)
SEGMENT_METADATA = SegmentationMeta(
segmenter_name,
version,
filters,
segment_config.terminal_capture_only,
segment_config.open_channel_prior_mean,
segment_config.open_channel_prior_stdv,
good_channels,
capture_windows_group,
)
SEGMENT_METADATA.as_group(capture_segmentation_group, log=log)
# print(__name__)
# capture_segmentation_group.attrs.create(
# "segmenter", __name__, dtype=hdf5_dtype(segmenter_name)
# )
# capture_segmentation_group.attrs.create(
# "segmenter_version", version, dtype=hdf5_dtype(version)
# )
# filter_group = self.f5.require_group(CAPTURE_PATH.CAPTURE_CRITERIA)
# context_id_group = self.f5.require_group(CAPTURE_PATH.CONTEXT_ID)
# capture_windows_group = self.f5.require_group(CAPTURE_PATH.CAPTURE_WINDOWS)
# Only supporting range capture_criteria for now (MVP): https://github.com/uwmisl/poretitioner/issues/67
if segment_config is None:
raise ValueError("No segment configuration provided.")
else:
# self.log.info(f"Saving Segment config: {segment_config!s}")
# for key, value in vars(segment_config).items():
# try:
# save_value = json.dumps(value)
# except TypeError:
# # In case the object isn't easily serializable
# save_value = json.dumps({k: v.__dict__ for k, v in value.items()})
# context_id_group.create_dataset(key, data=save_value)
# for name, my_filter in capture_criteria.items():
# filter_plugin = my_filter.plugin
# # `isinstance` is an anti-pattern, pls don't use in production.
# if isinstance(filter_plugin, RangeFilter):
# maximum = filter_plugin.maximum
# minimum = filter_plugin.minimum
# self.log.debug(
# f"Setting capture criteria for {name}: ({minimum}, {maximum})"
# )
# filter_group.create_dataset(name, data=(minimum, maximum))
pass
# Based on the example code, it doesn't seem like we write anything for ejected filter?
def validate(self, capture_filepath: PathLikeOrString, log: Logger = getLogger()):
"""Make sure this represents a valid capture/segmented poretitioner file.
Raises
------
ValueError
Capture/segmented file had some validation issues, details in message.
"""
pass
@property
def sampling_rate(self) -> int:
"""Retrieve the sampling rate from a capture fast5 file. Units: Hz.
Also referred to as the sample rate, sample frequency, or sampling
frequency.
Returns
-------
int
Sampling rate
"""
context_tag_path = "/Meta/context_tags"
sample_frequency_key = "sample_frequency"
sample_rate_path = "/Meta"
sample_rate_key = "sample_rate"
sample_frequency = None
try:
sample_frequency = int(self.f5[context_tag_path].attrs[sample_frequency_key])
except KeyError:
# Try checking the Meta group as a fallback.
try:
sample_rate = int(self.f5[sample_rate_path].attrs[sample_rate_key])
sample_frequency = sample_rate
except KeyError:
error_msg = f"Sampling rate not present in bulk file '{self.f5.filename}'. Make sure a sampling frequency is specified either at '{sample_rate_path}' with attribute '{sample_frequency_key}', or as a fallback, '{sample_rate_path}' with attribute '{sample_rate_key}'"
self.log.error(error_msg)
raise ValueError(error_msg)
rate = sample_frequency
return rate
@property
def reads(self, root: str = FAST5_ROOT) -> list[ReadId]:
root_group = self.f5.require_group(root)
potential_reads = [] if not root_group else root_group.keys()
reads = [read for read in potential_reads if self.is_read(read)]
return reads
def filter(self, filter_set: FilterSet) -> set[ReadId]:
# First, check whether this exact filter set exists in the file already
filter_path = self.fast5.get(FILTER_PATH.ROOT)
# If not, create it and write the result
# If so, log that it was a found and return the result
@property
def filtered_reads(self):
# TODO: Implement filtering here to only return reads that pass a filter. https://github.com/uwmisl/poretitioner/issues/67
return self.reads
def filter_sets(self):
pass
def is_read(self, group: HDF5_Group) -> bool:
"""Whether this group points to a capture read.
Parameters
----------
path : PathLikeOrString
Path that may point to a read.
Returns
-------
bool
True if and only if the path represents a read.
"""
# Reads should have Signal and channelId groups.
# If a path has both of theses, we consider the group a signal
has_channel_path_group = issubclass(group.get(KEY.CHANNEL_ID, getclass=True), h5py.Group)
has_signal_group = issubclass(group.get(KEY.SIGNAL, getclass=True), h5py.Dataset)
return has_channel_path_group and has_signal_group
def get_fractionalized_read(
self, read_id: ReadId, start: Optional[int] = None, end: Optional[int] = None
) -> FractionalizedSignal:
"""Gets the fractionalized signal from this read.
Parameters
----------
read_id : ReadId
Read to get the fractionalized signal from.
start : Optional[int], optional
Where to start the read, by default None
end : Optional[int], optional
Where to end the read, by default None
Returns
-------
FractionalizedSignal
Fractionalized signal from times `start` to `end`.
"""
signal_path = ChannelCalibration(read_id)
channel_path = channel_path_for_read_id(read_id)
open_channel_pA = self.f5[str(signal_path)].attrs[KEY.OPEN_CHANNEL_PA]
calibration = self.get_channel_calibration_for_read(read_id)
raw_signal: RawSignal = self.f5.get(str(signal_path))[start:end]
channel_number = self.f5.get(str(channel_path)).attrs["channel_number"]
fractionalized = FractionalizedSignal(
raw_signal,
channel_number,
calibration,
open_channel_pA,
read_id=read_id,
do_conversion=True,
)
return fractionalized
def get_channel_calibration_for_read(self, read_id: ReadId) -> ChannelCalibration:
"""Retrieve the channel calibration for a specific read in a segmented fast5 file (i.e. CaptureFile).
This is used for properly scaling values when converting raw signal to actual units.
Note: using UK spelling of digitization for consistency w/ file format
Parameters
----------
read_id : ReadId
Read id to retrieve raw signal. Can be formatted as a path ("read_xxx...")
or just the read id ("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx").
Returns
-------
ChannelCalibration
Channel calibration Offset, range, and digitisation values.
"""
channel_path = channel_path_for_read_id(read_id)
calibration = self.get_channel_calibration_for_path(channel_path)
return calibration
def get_capture_metadata_for_read(self, read_id: ReadId) -> CaptureMetadata:
"""Retrieve the capture metadata for given read.
Parameters
----------
read_id : ReadId
Which read to fetch the metadata for.
Returns
-------
CaptureMetadata
Metadata around the captures in this read.
"""
channel_path = channel_path_for_read_id(read_id)
signal_path = signal_path_for_read_id(read_id)
channel_number = self.f5[channel_path].attrs["channel_number"]
sampling_rate = self.f5[channel_path].attrs["sampling_rate"]
calibration = self.get_channel_calibration_for_read(read_id)
start_time_bulk = self.f5[signal_path].attrs["start_time_bulk"]
start_time_local = self.f5[signal_path].attrs["start_time_local"]
duration = self.f5[signal_path].attrs["duration"]
ejected = self.f5[signal_path].attrs["ejected"]
voltage_threshold = self.f5[signal_path].attrs["voltage"]
open_channel_pA = self.f5[signal_path].attrs[KEY.OPEN_CHANNEL_PA]
cap = CaptureMetadata(
read_id,
start_time_bulk,
start_time_local,
duration,
ejected,
voltage_threshold,
open_channel_pA,
channel_number,
calibration,
sampling_rate,
)
return cap
def write_capture(self, raw_signal: RawSignal, metadata: CaptureMetadata, log: Logger = None):
"""Write a single capture to the specified capture fast5 file (which has
already been created via create_capture_fast5()).
Parameters
----------
raw_signal : RawSignal
Time series of nanopore current values (in units of pA).
metadata: CaptureMetadata
Details about this capture.
"""
read_id = metadata.read_id
f5 = self.f5
channel_id = Channel(
metadata.channel_number,
metadata.calibration,
metadata.open_channel_pA,
metadata.sampling_rate,
)
self.start_time_bulk = metadata.start_time_bulk
self.start_time_local = metadata.start_time_local
self.duration = metadata.duration
self.voltage = metadata.voltage_threshold
self.open_channel_pA = metadata.open_channel_pA
self.read_id = read_id
self.ejected = metadata.ejected
# signal = HDF5_Signal.__new__(HDF5_Signal, raw_signal)
signal = Signal(
raw_signal,
metadata.start_time_bulk,
metadata.start_time_local,
metadata.duration,
metadata.voltage_threshold,
metadata.open_channel_pA,
read_id,
metadata.ejected,
metadata.channel_number,
)
# root = HDF5_Group(f5.require_group(read_id))
# read = Read(read_id, channel_id, signal)
# my_read = HDF5_Read(read_id, channel_id, signal, root)
# my_read.as_group(f5, log=log)
signal_path = str(signal_path_for_read_id(read_id))
f5[signal_path] = raw_signal
f5[signal_path].attrs["read_id"] = read_id
f5[signal_path].attrs["start_time_bulk"] = metadata.start_time_bulk
f5[signal_path].attrs["start_time_local"] = metadata.start_time_local
f5[signal_path].attrs["duration"] = metadata.duration
f5[signal_path].attrs["ejected"] = metadata.ejected
f5[signal_path].attrs["voltage"] = metadata.voltage_threshold
f5[signal_path].attrs[KEY.OPEN_CHANNEL_PA] = metadata.open_channel_pA
channel_path = str(channel_path_for_read_id(read_id))
f5.require_group(channel_path)
f5[channel_path].attrs["channel_number"] = metadata.channel_number
f5[channel_path].attrs["digitisation"] = metadata.calibration.digitisation
f5[channel_path].attrs["range"] = metadata.calibration.range
f5[channel_path].attrs["offset"] = metadata.calibration.offset
f5[channel_path].attrs["sampling_rate"] = metadata.sampling_rate
f5[channel_path].attrs[KEY.OPEN_CHANNEL_PA] = metadata.open_channel_pA
def write_capture_windows(self, capture_windows: WindowsByChannel):
pass
| StarcoderdataPython |
1608486 | #!/usr/local/bin/pyleabra -i
# Copyright (c) 2019, The Emergent Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# use:
# pyleabra -i ra25.py
# to run in gui interactive mode from the command line (or pyleabra, import ra25)
# see main function at the end for startup args
# stroop illustrates how the PFC can produce top-down biasing for executive control,
# in the context of the widely-studied Stroop task.
from leabra import go, leabra, emer, relpos, eplot, env, agg, patgen, prjn, etable, efile, split, etensor, params, netview, rand, erand, gi, giv, pygiv, pyparams, mat32, metric, simat, pca, clust
import importlib as il #il.reload(ra25) -- doesn't seem to work for reasons unknown
import io, sys, getopt
from datetime import datetime, timezone
from enum import Enum
import numpy as np
# import matplotlib
# matplotlib.use('SVG')
# import matplotlib.pyplot as plt
# plt.rcParams['svg.fonttype'] = 'none' # essential for not rendering fonts as paths
# note: pandas, xarray or pytorch TensorDataSet can be used for input / output
# patterns and recording of "log" data for plotting. However, the etable.Table
# has better GUI and API support, and handles tensor columns directly unlike
# pandas. Support for easy migration between these is forthcoming.
# import pandas as pd
# this will become Sim later..
TheSim = 1
# LogPrec is precision for saving float values in logs
LogPrec = 4
# note: we cannot use methods for callbacks from Go -- must be separate functions
# so below are all the callbacks from the GUI toolbar actions
def InitCB(recv, send, sig, data):
TheSim.Init()
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TrainCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.Train()
def StopCB(recv, send, sig, data):
TheSim.Stop()
def StepTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TrainTrial()
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def StepEpochCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainEpoch()
def StepRunCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.TrainRun()
def TestTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.TestTrial(False)
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def TestAllCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.RunTestAll()
def SOATestTrialCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.SOATestTrial(False)
TheSim.IsRunning = False
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def SOATestAllCB(recv, send, sig, data):
if not TheSim.IsRunning:
TheSim.IsRunning = True
TheSim.ToolBar.UpdateActions()
TheSim.RunSOATestAll()
def ResetTstTrlLogCB(recv, send, sig, data):
TheSim.TstTrlLog.SetNumRows(0)
TheSim.TstTrlPlot.Update()
def DefaultsCB(recv, send, sig, data):
TheSim.Defaults()
TheSim.Init()
TheSim.UpdateClassView()
TheSim.vp.SetNeedsFullRender()
def NewRndSeedCB(recv, send, sig, data):
TheSim.NewRndSeed()
def ReadmeCB(recv, send, sig, data):
gi.OpenURL("https://github.com/CompCogNeuro/sims/blob/master/ch10/stroop/README.md")
def UpdtFuncNotRunning(act):
act.SetActiveStateUpdt(not TheSim.IsRunning)
def UpdtFuncRunning(act):
act.SetActiveStateUpdt(TheSim.IsRunning)
#####################################################
# Sim
class Sim(pygiv.ClassViewObj):
"""
Sim encapsulates the entire simulation model, and we define all the
functionality as methods on this struct. This structure keeps all relevant
state information organized and available without having to pass everything around
as arguments to methods, and provides the core GUI interface (note the view tags
for the fields which provide hints to how things should be displayed).
"""
def __init__(self):
super(Sim, self).__init__()
self.FmPFC = float(0.3)
self.SetTags("FmPFC", 'def:"0.3" step:"0.01" desc:"strength of projection from PFC to Hidden -- reduce to simulate PFC damage"')
self.DtVmTau = float(30)
self.SetTags("DtVmTau", 'def:"30" step:"5" desc:"time constant for updating the network "')
self.Net = leabra.Network()
self.SetTags("Net", 'view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"')
self.TrainPats = etable.Table()
self.SetTags("TrainPats", 'view:"no-inline" desc:"training patterns"')
self.TestPats = etable.Table()
self.SetTags("TestPats", 'view:"no-inline" desc:"testing patterns"')
self.SOAPats = etable.Table()
self.SetTags("SOAPats", 'view:"no-inline" desc:"SOA testing patterns"')
self.TrnEpcLog = etable.Table()
self.SetTags("TrnEpcLog", 'view:"no-inline" desc:"training epoch-level log data"')
self.TstEpcLog = etable.Table()
self.SetTags("TstEpcLog", 'view:"no-inline" desc:"testing epoch-level log data"')
self.TstTrlLog = etable.Table()
self.SetTags("TstTrlLog", 'view:"no-inline" desc:"testing trial-level log data"')
self.SOATrlLog = etable.Table()
self.SetTags("SOATrlLog", 'view:"no-inline" desc:"SOA testing trial-level log data"')
self.RunLog = etable.Table()
self.SetTags("RunLog", 'view:"no-inline" desc:"summary log of each run"')
self.RunStats = etable.Table()
self.SetTags("RunStats", 'view:"no-inline" desc:"aggregate stats on all runs"')
self.Params = params.Sets()
self.SetTags("Params", 'view:"no-inline" desc:"full collection of param sets"')
self.ParamSet = str()
self.SetTags("ParamSet", 'view:"-" desc:"which set of *additional* parameters to use -- always applies Base and optionaly this next if set -- can use multiple names separated by spaces (don\'t put spaces in ParamSet names!)"')
self.MaxRuns = int(1)
self.SetTags("MaxRuns", 'desc:"maximum number of model runs to perform"')
self.MaxEpcs = int(55)
self.SetTags("MaxEpcs", 'desc:"maximum number of epochs to run per model run"')
self.TrainEnv = env.FreqTable()
self.SetTags("TrainEnv", 'desc:"Training environment -- contains everything about iterating over input / output patterns over training"')
self.TestEnv = env.FixedTable()
self.SetTags("TestEnv", 'desc:"Testing environment for std strooop -- manages iterating over testing"')
self.SOATestEnv = env.FixedTable()
self.SetTags("SOATestEnv", 'desc:"Testing environment for SOA tests -- manages iterating over testing"')
self.Time = leabra.Time()
self.SetTags("Time", 'desc:"leabra timing parameters and state"')
self.ViewOn = True
self.SetTags("ViewOn", 'desc:"whether to update the network view while running"')
self.TrainUpdt = leabra.TimeScales.Quarter
self.SetTags("TrainUpdt", 'desc:"at what time scale to update the display during training? Anything longer than Epoch updates at Epoch in this model"')
self.TestUpdt = leabra.TimeScales.Cycle
self.SetTags("TestUpdt", 'desc:"at what time scale to update the display during testing? Anything longer than Epoch updates at Epoch in this model"')
self.TestInterval = int(5)
self.SetTags("TestInterval", 'desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"')
self.TstRecLays = go.Slice_string(["Colors", "Words", "PFC", "Hidden", "Output"])
self.SetTags("TstRecLays", 'desc:"names of layers to record activations etc of during testing"')
# statistics: note use float64 as that is best for etable.Table
self.TrlErr = float()
self.SetTags("TrlErr", 'inactive:"+" desc:"1 if trial was error, 0 if correct -- based on SSE = 0 (subject to .5 unit-wise tolerance)"')
self.TrlSSE = float()
self.SetTags("TrlSSE", 'inactive:"+" desc:"current trial\'s sum squared error"')
self.TrlAvgSSE = float()
self.SetTags("TrlAvgSSE", 'inactive:"+" desc:"current trial\'s average sum squared error"')
self.TrlCosDiff = float()
self.SetTags("TrlCosDiff", 'inactive:"+" desc:"current trial\'s cosine difference"')
self.SOA = int()
self.SetTags("SOA", 'inactive:"+" desc:"current SOA value"')
self.SOAMaxCyc = int()
self.SetTags("SOAMaxCyc", 'inactive:"+" desc:"current max cycles value for SOA"')
self.SOATrlTyp = int()
self.SetTags("SOATrlTyp", 'inactive:"+" desc:"current trial type for SOA"')
self.EpcSSE = float()
self.SetTags("EpcSSE", 'inactive:"+" desc:"last epoch\'s total sum squared error"')
self.EpcAvgSSE = float()
self.SetTags("EpcAvgSSE", 'inactive:"+" desc:"last epoch\'s average sum squared error (average over trials, and over units within layer)"')
self.EpcPctErr = float()
self.SetTags("EpcPctErr", 'inactive:"+" desc:"last epoch\'s percent of trials that had SSE > 0 (subject to .5 unit-wise tolerance)"')
self.EpcPctCor = float()
self.SetTags("EpcPctCor", 'inactive:"+" desc:"last epoch\'s percent of trials that had SSE == 0 (subject to .5 unit-wise tolerance)"')
self.EpcCosDiff = float()
self.SetTags("EpcCosDiff", 'inactive:"+" desc:"last epoch\'s average cosine difference for output layer (a normalized error measure, maximum of 1 when the minus phase exactly matches the plus)"')
# internal state - view:"-"
self.SumErr = float()
self.SetTags("SumErr", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumSSE = float()
self.SetTags("SumSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumAvgSSE = float()
self.SetTags("SumAvgSSE", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.SumCosDiff = float()
self.SetTags("SumCosDiff", 'view:"-" inactive:"+" desc:"sum to increment as we go through epoch"')
self.Win = 0
self.SetTags("Win", 'view:"-" desc:"main GUI window"')
self.NetView = 0
self.SetTags("NetView", 'view:"-" desc:"the network viewer"')
self.ToolBar = 0
self.SetTags("ToolBar", 'view:"-" desc:"the master toolbar"')
self.TrnEpcPlot = 0
self.SetTags("TrnEpcPlot", 'view:"-" desc:"the training epoch plot"')
self.TstEpcPlot = 0
self.SetTags("TstEpcPlot", 'view:"-" desc:"the testing epoch plot"')
self.TstTrlPlot = 0
self.SetTags("TstTrlPlot", 'view:"-" desc:"the test-trial plot"')
self.SOATrlPlot = 0
self.SetTags("SOATrlPlot", 'view:"-" desc:"the SOA test-trial plot"')
self.RunPlot = 0
self.SetTags("RunPlot", 'view:"-" desc:"the run plot"')
self.TrnEpcFile = 0
self.SetTags("TrnEpcFile", 'view:"-" desc:"log file"')
self.RunFile = 0
self.SetTags("RunFile", 'view:"-" desc:"log file"')
self.ValsTsrs = {}
self.SetTags("ValsTsrs", 'view:"-" desc:"for holding layer values"')
self.IsRunning = False
self.SetTags("IsRunning", 'view:"-" desc:"true if sim is running"')
self.StopNow = False
self.SetTags("StopNow", 'view:"-" desc:"flag to stop running"')
self.NeedsNewRun = False
self.SetTags("NeedsNewRun", 'view:"-" desc:"flag to initialize NewRun if last one finished"')
self.RndSeed = int(1)
self.SetTags("RndSeed", 'view:"-" desc:"the current random seed"')
self.vp = 0
self.SetTags("vp", 'view:"-" desc:"viewport"')
def InitParams(ss):
"""
Sets the default set of parameters -- Base is always applied, and others can be optionally
selected to apply on top of that
"""
ss.Params.OpenJSON("stroop.params")
ss.Defaults()
def Defaults(ss):
ss.FmPFC = 0.3
ss.DtVmTau = 30
def Config(ss):
"""
Config configures all the elements using the standard functions
"""
ss.InitParams()
ss.OpenPats()
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
ss.ConfigTrnEpcLog(ss.TrnEpcLog)
ss.ConfigTstEpcLog(ss.TstEpcLog)
ss.ConfigTstTrlLog(ss.TstTrlLog)
ss.ConfigSOATrlLog(ss.SOATrlLog)
ss.ConfigRunLog(ss.RunLog)
def ConfigEnv(ss):
if ss.MaxRuns == 0:
ss.MaxRuns = 1
if ss.MaxEpcs == 0: # allow user override
ss.MaxEpcs = 55
ss.TrainEnv.Nm = "TrainEnv"
ss.TrainEnv.Dsc = "training params and state"
ss.TrainEnv.Table = etable.NewIdxView(ss.TrainPats)
ss.TrainEnv.NSamples = 1
# ss.TrainEnv.Validate()
ss.TrainEnv.Run.Max = ss.MaxRuns # note: we are not setting epoch max -- do that manually
ss.TestEnv.Nm = "TestEnv"
ss.TestEnv.Dsc = "testing params and state"
ss.TestEnv.Table = etable.NewIdxView(ss.TestPats)
ss.TestEnv.Sequential = True
# ss.TestEnv.Validate()
ss.SOATestEnv.Nm = "SOATestEnv"
ss.SOATestEnv.Dsc = "test all params and state"
ss.SOATestEnv.Table = etable.NewIdxView(ss.SOAPats)
ss.SOATestEnv.Sequential = True
# ss.SOATestEnv.Validate()
ss.TrainEnv.Init(0)
ss.TestEnv.Init(0)
ss.SOATestEnv.Init(0)
def ConfigNet(ss, net):
net.InitName(net, "Stroop")
clr = net.AddLayer2D("Colors", 1, 2, emer.Input)
wrd = net.AddLayer2D("Words", 1, 2, emer.Input)
hid = net.AddLayer4D("Hidden", 1, 2, 1, 2, emer.Hidden)
pfc = net.AddLayer2D("PFC", 1, 2, emer.Input)
out = net.AddLayer2D("Output", 1, 2, emer.Target)
full = prjn.NewFull()
clr2hid = prjn.NewOneToOne()
wrd2hid = prjn.NewOneToOne()
wrd2hid.RecvStart = 2
pfc2hid = prjn.NewRect()
pfc2hid.Scale.Set(0.5, 0.5)
pfc2hid.Size.Set(1, 1)
net.ConnectLayers(clr, hid, clr2hid, emer.Forward)
net.ConnectLayers(wrd, hid, wrd2hid, emer.Forward)
net.ConnectLayers(pfc, hid, pfc2hid, emer.Back)
net.BidirConnectLayersPy(hid, out, full)
wrd.SetRelPos(relpos.Rel(Rel= relpos.RightOf, Other= "Colors", YAlign= relpos.Front, Space= 1))
out.SetRelPos(relpos.Rel(Rel= relpos.RightOf, Other= "Words", YAlign= relpos.Front, Space= 1))
hid.SetRelPos(relpos.Rel(Rel= relpos.Above, Other= "Colors", YAlign= relpos.Front, XAlign= relpos.Left, YOffset= 1))
pfc.SetRelPos(relpos.Rel(Rel= relpos.RightOf, Other= "Hidden", YAlign= relpos.Front, Space= 1))
net.Defaults()
ss.SetParams("Network", False) # only set Network params
net.Build()
net.InitWts()
def Init(ss):
"""
Init restarts the run, and initializes everything, including network weights
# all sheets
and resets the epoch log table
"""
rand.Seed(ss.RndSeed)
ss.StopNow = False
ss.SetParams("", False)
ss.NewRun()
ss.UpdateView(True)
def NewRndSeed(ss):
"""
NewRndSeed gets a new random seed based on current time -- otherwise uses
the same random seed for every run
"""
ss.RndSeed = int(datetime.now(timezone.utc).timestamp())
def Counters(ss, train):
"""
Counters returns a string of the current counter state
use tabs to achieve a reasonable formatting overall
and add a few tabs at the end to allow for expansion..
"""
if train:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TrainEnv.Trial.Cur, ss.Time.Cycle, ss.TrainEnv.TrialName.Cur)
else:
return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (ss.TrainEnv.Run.Cur, ss.TrainEnv.Epoch.Cur, ss.TestEnv.Trial.Cur, ss.Time.Cycle, ss.TestEnv.TrialName.Cur)
def UpdateView(ss, train):
if ss.NetView != 0 and ss.NetView.IsVisible():
ss.NetView.Record(ss.Counters(train))
ss.NetView.GoUpdate()
def AlphaCyc(ss, train):
"""
AlphaCyc runs one alpha-cycle (100 msec, 4 quarters) of processing.
External inputs must have already been applied prior to calling,
using ApplyExt method on relevant layers (see TrainTrial, TestTrial).
If train is true, then learning DWt or WtFmDWt calls are made.
Handles netview updating within scope of AlphaCycle
"""
if ss.Win != 0:
ss.Win.PollEvents() # this is essential for GUI responsiveness while running
viewUpdt = ss.TrainUpdt.value
if not train:
viewUpdt = ss.TestUpdt.value
if train:
ss.Net.WtFmDWt()
ss.Net.AlphaCycInit()
ss.Time.AlphaCycStart()
for qtr in range(4):
for cyc in range(ss.Time.CycPerQtr):
ss.Net.Cycle(ss.Time)
ss.Time.CycleInc()
if ss.ViewOn:
if viewUpdt == leabra.Cycle:
if cyc != ss.Time.CycPerQtr-1: # will be updated by quarter
ss.UpdateView(train)
if viewUpdt == leabra.FastSpike:
if (cyc+1)%10 == 0:
ss.UpdateView(train)
ss.Net.QuarterFinal(ss.Time)
ss.Time.QuarterInc()
if ss.ViewOn:
if viewUpdt <= leabra.Quarter:
ss.UpdateView(train)
if viewUpdt == leabra.Phase:
if qtr >= 2:
ss.UpdateView(train)
if train:
ss.Net.DWt()
if ss.ViewOn and viewUpdt == leabra.AlphaCycle:
ss.UpdateView(train)
def AlphaCycTest(ss):
"""
AlphaCycTest is for testing -- uses threshold stopping and longer quarters
"""
viewUpdt = ss.TestUpdt.value
train = False
out = leabra.Layer(ss.Net.LayerByName("Output"))
ss.Net.AlphaCycInit()
ss.Time.AlphaCycStart()
overThresh = False
for qtr in range(4):
for cyc in range(75): # note: fixed 75 per quarter = 200 total
ss.Net.Cycle(ss.Time)
ss.Time.CycleInc()
if ss.ViewOn:
if viewUpdt == leabra.Cycle:
ss.UpdateView(train)
if viewUpdt == leabra.FastSpike:
if (cyc+1)%10 == 0:
ss.UpdateView(train)
outact = out.Pools[0].Inhib.Act.Max
if outact > 0.51:
overThresh = True
break
ss.Net.QuarterFinal(ss.Time)
ss.Time.QuarterInc()
if ss.ViewOn:
if viewUpdt <= leabra.Quarter:
ss.UpdateView(train)
if viewUpdt == leabra.Phase:
if qtr >= 2:
ss.UpdateView(train)
if overThresh:
break
ss.UpdateView(False)
def AlphaCycTestCyc(ss, cycs):
"""
AlphaCycTestCyc test with specified number of cycles
"""
viewUpdt = ss.TestUpdt.value
train = False
out = leabra.Layer(ss.Net.LayerByName("Output"))
ss.Net.AlphaCycInit()
ss.Time.AlphaCycStart()
for cyc in range(cycs): # just fixed cycles, no quarters
ss.Net.Cycle(ss.Time)
ss.Time.CycleInc()
if ss.ViewOn:
if viewUpdt == leabra.Cycle:
ss.UpdateView(train)
if viewUpdt == leabra.FastSpike:
if (cyc+1)%10 == 0:
ss.UpdateView(train)
outact = out.Pools[0].Inhib.Act.Max
if cycs > 100 and outact > 0.51: # only for long trials
break
ss.Net.QuarterFinal(ss.Time)
ss.Time.QuarterInc()
if ss.ViewOn:
if viewUpdt <= leabra.Quarter:
ss.UpdateView(train)
if viewUpdt == leabra.Phase:
ss.UpdateView(train)
ss.UpdateView(False)
def ApplyInputs(ss, en):
"""
ApplyInputs applies input patterns from given envirbonment.
It is good practice to have this be a separate method with appropriate
# going to the same layers, but good practice and cheap anyway
args so that it can be used for various different contexts
(training, testing, etc).
"""
ss.Net.InitExt()
lays = go.Slice_string(["Colors", "Words", "Output", "PFC"])
for lnm in lays :
ly = leabra.Layer(ss.Net.LayerByName(lnm))
pats = en.State(ly.Nm)
if pats != 0:
ly.ApplyExt(pats)
def TrainTrial(ss):
"""
TrainTrial runs one trial of training using TrainEnv
"""
if ss.NeedsNewRun:
ss.NewRun()
ss.TrainEnv.Step()
# Key to query counters FIRST because current state is in NEXT epoch
# if epoch counter has changed
epc = env.CounterCur(ss.TrainEnv, env.Epoch)
chg = env.CounterChg(ss.TrainEnv, env.Epoch)
if chg:
ss.LogTrnEpc(ss.TrnEpcLog)
if ss.ViewOn and ss.TrainUpdt.value > leabra.AlphaCycle:
ss.UpdateView(True)
if ss.TestInterval > 0 and epc%ss.TestInterval == 0: # note: epc is *next* so won't trigger first time
ss.TestAll()
if epc >= ss.MaxEpcs:
# done with training..
ss.RunEnd()
if ss.TrainEnv.Run.Incr(): # we are done!
ss.StopNow = True
return
else:
ss.NeedsNewRun = True
return
ss.SetParamsSet("Training", "Network", False)
out = leabra.Layer(ss.Net.LayerByName("Output"))
out.SetType(emer.Target)
ss.ApplyInputs(ss.TrainEnv)
ss.AlphaCyc(True) # train
ss.TrialStats(True) # accumulate
def RunEnd(ss):
"""
RunEnd is called at the end of a run -- save weights, record final log, etc here
"""
ss.LogRun(ss.RunLog)
def NewRun(ss):
"""
NewRun intializes a new run of the model, using the TrainEnv.Run counter
for the new run value
"""
run = ss.TrainEnv.Run.Cur
ss.TrainEnv.Init(run)
ss.TestEnv.Init(run)
ss.SOATestEnv.Init(run)
ss.Time.Reset()
ss.Net.InitWts()
ss.InitStats()
ss.TrnEpcLog.SetNumRows(0)
ss.TstEpcLog.SetNumRows(0)
ss.NeedsNewRun = False
def InitStats(ss):
"""
InitStats initializes all the statistics, especially important for the
cumulative epoch stats -- called at start of new run
"""
ss.SumErr = 0
ss.SumSSE = 0
ss.SumAvgSSE = 0
ss.SumCosDiff = 0
ss.TrlErr = 0
ss.TrlSSE = 0
ss.TrlAvgSSE = 0
ss.EpcSSE = 0
ss.EpcAvgSSE = 0
ss.EpcPctErr = 0
ss.EpcCosDiff = 0
def TrialStats(ss, accum):
"""
TrialStats computes the trial-level statistics and adds them to the epoch accumulators if
accum is true. Note that we're accumulating stats here on the Sim side so the
core algorithm side remains as simple as possible, and doesn't need to worry about
different time-scales over which stats could be accumulated etc.
You can also aggregate directly from log data, as is done for testing stats
"""
out = leabra.Layer(ss.Net.LayerByName("Output"))
ss.TrlCosDiff = float(out.CosDiff.Cos)
ss.TrlSSE = out.SSE(0.5) # 0.5 = per-unit tolerance -- right side of .5
ss.TrlAvgSSE = ss.TrlSSE / len(out.Neurons)
if ss.TrlSSE > 0:
ss.TrlErr = 1
else:
ss.TrlErr = 0
if accum:
ss.SumErr += ss.TrlErr
ss.SumSSE += ss.TrlSSE
ss.SumAvgSSE += ss.TrlAvgSSE
ss.SumCosDiff += ss.TrlCosDiff
return
def TrainEpoch(ss):
"""
TrainEpoch runs training trials for remainder of this epoch
"""
ss.StopNow = False
curEpc = ss.TrainEnv.Epoch.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:
break
ss.Stopped()
def TrainRun(ss):
"""
TrainRun runs training trials for remainder of run
"""
ss.StopNow = False
curRun = ss.TrainEnv.Run.Cur
while True:
ss.TrainTrial()
if ss.StopNow or ss.TrainEnv.Run.Cur != curRun:
break
ss.Stopped()
def Train(ss):
"""
Train runs the full training from this point onward
"""
ss.StopNow = False
while True:
ss.TrainTrial()
if ss.StopNow:
break
ss.Stopped()
def Stop(ss):
"""
Stop tells the sim to stop running
"""
ss.StopNow = True
def Stopped(ss):
"""
Stopped is called when a run method stops running -- updates the IsRunning flag and toolbar
"""
ss.IsRunning = False
if ss.Win != 0:
vp = ss.Win.WinViewport2D()
if ss.ToolBar != 0:
ss.ToolBar.UpdateActions()
vp.SetNeedsFullRender()
ss.UpdateClassView()
def SaveWeights(ss, filename):
"""
SaveWeights saves the network weights -- when called with giv.CallMethod
it will auto-prompt for filename
"""
ss.Net.SaveWtsJSON(filename)
def TestTrial(ss, returnOnChg):
"""
TestTrial runs one trial of testing -- always sequentially presented inputs
"""
ss.TestEnv.Step()
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg:
if ss.ViewOn and ss.TestUpdt.value > leabra.AlphaCycle:
ss.UpdateView(False)
ss.LogTstEpc(ss.TstEpcLog)
if returnOnChg:
return
ss.SetParamsSet("Testing", "Network", False)
out = leabra.Layer(ss.Net.LayerByName("Output"))
out.SetType(emer.Compare)
ss.ApplyInputs(ss.TestEnv)
ss.AlphaCycTest()
ss.TrialStats(False)
ss.LogTstTrl(ss.TstTrlLog, ss.TestEnv.Trial.Cur, ss.TestEnv.TrialName.Cur)
def TestAll(ss):
"""
TestAll runs through the full set of testing items
"""
ss.SetParamsSet("Testing", "Network", False)
ss.TestEnv.Init(ss.TrainEnv.Run.Cur)
while True:
ss.TestTrial(True)
chg = env.CounterChg(ss.TestEnv, env.Epoch)
if chg or ss.StopNow:
break
def RunTestAll(ss):
"""
RunTestAll runs through the full set of testing items, has stop running = false at end -- for gui
"""
ss.StopNow = False
ss.TestAll()
ss.Stopped()
def SOATestTrial(ss, returnOnChg):
"""
SOATestTrial runs one trial of testing -- always sequentially presented inputs
"""
ss.SOATestEnv.Step()
chg = env.CounterChg(ss.SOATestEnv, env.Epoch)
if chg:
if ss.ViewOn and ss.TestUpdt.value > leabra.AlphaCycle:
ss.UpdateView(False)
if returnOnChg:
return
trl = ss.SOATestEnv.Trial.Cur
ss.SOA = int(ss.SOAPats.CellFloat("SOA", trl))
ss.SOAMaxCyc = int(ss.SOAPats.CellFloat("MaxCycles", trl))
ss.SOATrlTyp = int(ss.SOAPats.CellFloat("TrialType", trl))
ss.SetParamsSet("Testing", "Network", False)
ss.SetParamsSet("SOATesting", "Network", False)
out = leabra.Layer(ss.Net.LayerByName("Output"))
out.SetType(emer.Compare)
islate = "latestim" in ss.SOATestEnv.TrialName.Cur
if not islate or ss.SOA == 0:
ss.Net.InitActs()
ss.ApplyInputs(ss.SOATestEnv)
ss.AlphaCycTestCyc(ss.SOAMaxCyc)
if "latestim" in ss.SOATestEnv.TrialName.Cur:
ss.TrialStats(False)
ss.LogSOATrl(ss.SOATrlLog, ss.SOATestEnv.Trial.Cur)
def SOATestAll(ss):
"""
SOATestAll runs through the full set of testing items
"""
ss.SOATestEnv.Init(ss.TrainEnv.Run.Cur)
ss.SOATrlLog.SetNumRows(0)
while True:
ss.SOATestTrial(True)
chg = env.CounterChg(ss.SOATestEnv, env.Epoch)
if chg or ss.StopNow:
break
def RunSOATestAll(ss):
"""
RunSOATestAll runs through the full set of testing items, has stop running = false at end -- for gui
"""
ss.StopNow = False
ss.SOATestAll()
ss.Stopped()
def SetParams(ss, sheet, setMsg):
"""
SetParams sets the params for "Base" and then current ParamSet.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
if sheet == "":
ss.Params.ValidateSheets(go.Slice_string(["Network", "Sim"]))
ss.SetParamsSet("Base", sheet, setMsg)
if ss.ParamSet != "" and ss.ParamSet != "Base":
sps = ss.ParamSet.split()
for ps in sps:
ss.SetParamsSet(ps, sheet, setMsg)
def SetParamsSet(ss, setNm, sheet, setMsg):
"""
SetParamsSet sets the params for given params.Set name.
If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
otherwise just the named sheet
if setMsg = true then we output a message for each param that was set.
"""
pset = ss.Params.SetByNameTry(setNm)
if sheet == "" or sheet == "Network":
spo = ss.Params.SetByName("Testing").SheetByName("Network").SelByName("Layer")
spo.Params.SetParamByName("Layer.Act.Dt.VmTau", ("%g" % (ss.DtVmTau)))
if "Network" in pset.Sheets:
netp = pset.SheetByNameTry("Network")
ss.Net.ApplyParams(netp, setMsg)
hid = leabra.Layer(ss.Net.LayerByName("Hidden"))
fmpfc = leabra.Prjn(hid.RcvPrjns.SendName("PFC"))
fmpfc.WtScale.Rel = ss.FmPFC
if sheet == "" or sheet == "Sim":
if "Sim" in pset.Sheets:
simp= pset.SheetByNameTry("Sim")
pyparams.ApplyParams(ss, simp, setMsg)
if sheet == "" or sheet == "Sim":
if "Sim" in pset.Sheets:
ss = pset.Sheets["Sim"]
simp.Apply(ss, setMsg)
def OpenPat(ss, dt, fname, name, desc):
dt.OpenCSV(fname, etable.Tab)
dt.SetMetaData("name", name)
dt.SetMetaData("desc", desc)
def OpenPats(ss):
ss.OpenPat(ss.TrainPats, "stroop_train.tsv", "Stroop Train", "Stroop Training patterns")
ss.OpenPat(ss.TestPats, "stroop_test.tsv", "Stroop Test", "Stroop Testing patterns")
ss.OpenPat(ss.SOAPats, "stroop_soa.tsv", "Stroop SOA", "Stroop SOA Testing patterns")
def ValsTsr(ss, name):
"""
ValsTsr gets value tensor of given name, creating if not yet made
"""
if name in ss.ValsTsrs:
return ss.ValsTsrs[name]
tsr = etensor.Float32()
ss.ValsTsrs[name] = tsr
return tsr
def LogTrnEpc(ss, dt):
"""
LogTrnEpc adds data from current epoch to the TrnEpcLog table.
computes epoch averages prior to logging.
"""
row = dt.Rows
dt.SetNumRows(row + 1)
epc = ss.TrainEnv.Epoch.Prv
nt = float(len(ss.TrainEnv.Order))
ss.EpcSSE = ss.SumSSE / nt
ss.SumSSE = 0
ss.EpcAvgSSE = ss.SumAvgSSE / nt
ss.SumAvgSSE = 0
ss.EpcPctErr = float(ss.SumErr) / nt
ss.SumErr = 0
ss.EpcPctCor = 1 - ss.EpcPctErr
ss.EpcCosDiff = ss.SumCosDiff / nt
ss.SumCosDiff = 0
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, ss.EpcSSE)
dt.SetCellFloat("AvgSSE", row, ss.EpcAvgSSE)
dt.SetCellFloat("PctErr", row, ss.EpcPctErr)
dt.SetCellFloat("PctCor", row, ss.EpcPctCor)
dt.SetCellFloat("CosDiff", row, ss.EpcCosDiff)
ss.TrnEpcPlot.GoUpdate()
if ss.TrnEpcFile != 0:
if ss.TrainEnv.Run.Cur == 0 and epc == 0:
dt.WriteCSVHeaders(ss.TrnEpcFile, etable.Tab)
dt.WriteCSVRow(ss.TrnEpcFile, row, etable.Tab)
def ConfigTrnEpcLog(ss, dt):
dt.SetMetaData("name", "TrnEpcLog")
dt.SetMetaData("desc", "Record of performance over epochs of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigTrnEpcPlot(ss, plt, dt):
plt.Params.Title = "Stroop Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.On, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogTstTrl(ss, dt, trl, trlnm):
"""
LogTstTrl adds data from current trial to the TstTrlLog table.
# this is triggered by increment so use previous value
log always contains number of testing items
"""
epc = ss.TrainEnv.Epoch.Prv
row = dt.Rows
if dt.Rows <= row:
dt.SetNumRows(row + 1)
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("Trial", row, float(trl%3))
dt.SetCellString("TrialName", row, trlnm)
dt.SetCellFloat("Cycle", row, float(ss.Time.Cycle))
dt.SetCellFloat("Err", row, ss.TrlErr)
dt.SetCellFloat("SSE", row, ss.TrlSSE)
dt.SetCellFloat("AvgSSE", row, ss.TrlAvgSSE)
dt.SetCellFloat("CosDiff", row, ss.TrlCosDiff)
for lnm in ss.TstRecLays :
tsr = ss.ValsTsr(lnm)
ly = leabra.Layer(ss.Net.LayerByName(lnm))
ly.UnitValsTensor(tsr, "ActM") # get minus phase act
dt.SetCellTensor(lnm, row, tsr)
# note: essential to use Go version of update when called from another goroutine
ss.TstTrlPlot.GoUpdate()
def ConfigTstTrlLog(ss, dt):
dt.SetMetaData("name", "TstTrlLog")
dt.SetMetaData("desc", "Record of testing per input pattern")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
nt = ss.TestEnv.Table.Len() # number in view
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("Trial", etensor.INT64, go.nil, go.nil),
etable.Column("TrialName", etensor.STRING, go.nil, go.nil),
etable.Column("Cycle", etensor.INT64, go.nil, go.nil),
etable.Column("Err", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.TstRecLays :
ly = leabra.Layer(ss.Net.LayerByName(lnm))
sch.append( etable.Column(lnm, etensor.FLOAT64, ly.Shp.Shp, go.nil))
dt.SetFromSchema(sch, nt)
def ConfigTstTrlPlot(ss, plt, dt):
plt.Params.Title = "Stroop Test Trial Plot"
plt.Params.XAxisCol = "Trial"
plt.SetTable(dt)
plt.Params.Points = True
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Trial", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("TrialName", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Cycle", eplot.On, eplot.FixMin, 0, eplot.FixMax, 250) # default plot
plt.SetColParams("Err", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.TstRecLays :
cp = plt.SetColParams(lnm, eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
cp.TensorIdx = -1 # plot all
return plt
def LogSOATrl(ss, dt, trl):
"""
LogSOATrl adds data from current trial to the SOATrlLog table.
# this is triggered by increment so use previous value
log always contains number of testing items
"""
epc = ss.TrainEnv.Epoch.Prv
row = dt.Rows
if dt.Rows <= row:
dt.SetNumRows(row + 1)
conds = go.Slice_string(["Color_Conf", "Color_Cong", "Word_Conf", "Word_Cong"])
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("Trial", row, float(ss.SOATrlTyp))
dt.SetCellFloat("SOA", row, float(ss.SOA))
dt.SetCellString("TrialName", row, conds[ss.SOATrlTyp])
dt.SetCellFloat("Cycle", row, float(ss.Time.Cycle))
dt.SetCellFloat("Err", row, ss.TrlErr)
dt.SetCellFloat("SSE", row, ss.TrlSSE)
dt.SetCellFloat("AvgSSE", row, ss.TrlAvgSSE)
dt.SetCellFloat("CosDiff", row, ss.TrlCosDiff)
for lnm in ss.TstRecLays :
tsr = ss.ValsTsr(lnm)
ly = leabra.Layer(ss.Net.LayerByName(lnm))
ly.UnitValsTensor(tsr, "ActM") # get minus phase act
dt.SetCellTensor(lnm, row, tsr)
# note: essential to use Go version of update when called from another goroutine
ss.SOATrlPlot.GoUpdate()
def ConfigSOATrlLog(ss, dt):
dt.SetMetaData("name", "SOATrlLog")
dt.SetMetaData("desc", "Record of testing per input pattern")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
nt = ss.SOATestEnv.Table.Len() # number in view
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("Trial", etensor.INT64, go.nil, go.nil),
etable.Column("SOA", etensor.INT64, go.nil, go.nil),
etable.Column("TrialName", etensor.STRING, go.nil, go.nil),
etable.Column("Cycle", etensor.INT64, go.nil, go.nil),
etable.Column("Err", etensor.FLOAT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
for lnm in ss.TstRecLays :
ly = leabra.Layer(ss.Net.LayerByName(lnm))
sch.append( etable.Column(lnm, etensor.FLOAT64, ly.Shp.Shp, go.nil))
dt.SetFromSchema(sch, nt)
def ConfigSOATrlPlot(ss, plt, dt):
plt.Params.Title = "Stroop SOA Test Trial Plot"
plt.Params.XAxisCol = "SOA"
plt.Params.LegendCol = "TrialName"
plt.SetTable(dt)
plt.Params.Points = True
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Trial", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SOA", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("TrialName", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Cycle", eplot.On, eplot.FixMin, 0, eplot.FixMax, 220) # default plot
plt.SetColParams("Err", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
for lnm in ss.TstRecLays :
cp = plt.SetColParams(lnm, eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
cp.TensorIdx = -1 # plot all
return plt
def LogTstEpc(ss, dt):
row = dt.Rows
dt.SetNumRows(row + 1)
trl = ss.TstTrlLog
tix = etable.NewIdxView(trl)
epc = ss.TrainEnv.Epoch.Prv # ?
# note: this shows how to use agg methods to compute summary data from another
# data table, instead of incrementing on the Sim
dt.SetCellFloat("Run", row, float(ss.TrainEnv.Run.Cur))
dt.SetCellFloat("Epoch", row, float(epc))
dt.SetCellFloat("SSE", row, agg.Sum(tix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(tix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(tix, "Err")[0])
dt.SetCellFloat("PctCor", row, 1-agg.Mean(tix, "Err")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(tix, "CosDiff")[0])
# note: essential to use Go version of update when called from another goroutine
ss.TstEpcPlot.GoUpdate()
def ConfigTstEpcLog(ss, dt):
dt.SetMetaData("name", "TstEpcLog")
dt.SetMetaData("desc", "Summary stats for testing trials")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Epoch", etensor.INT64, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigTstEpcPlot(ss, plt, dt):
plt.Params.Title = "Stroop Testing Epoch Plot"
plt.Params.XAxisCol = "Epoch"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("Epoch", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0) # default plot
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def LogRun(ss, dt):
"""
LogRun adds data from current run to the RunLog table.
# this is NOT triggered by increment yet -- use Cur
"""
run = ss.TrainEnv.Run.Cur
row = dt.Rows
dt.SetNumRows(row + 1)
epclog = ss.TrnEpcLog
epcix = etable.NewIdxView(epclog)
# compute mean over last N epochs for run level
nlast = 5
if nlast > epcix.Len()-1:
nlast = epcix.Len() - 1
epcix.Idxs = epcix.Idxs[epcix.Len()-nlast:]
params = ""
dt.SetCellFloat("Run", row, float(run))
dt.SetCellString("Params", row, params)
dt.SetCellFloat("SSE", row, agg.Mean(epcix, "SSE")[0])
dt.SetCellFloat("AvgSSE", row, agg.Mean(epcix, "AvgSSE")[0])
dt.SetCellFloat("PctErr", row, agg.Mean(epcix, "PctErr")[0])
dt.SetCellFloat("PctCor", row, agg.Mean(epcix, "PctCor")[0])
dt.SetCellFloat("CosDiff", row, agg.Mean(epcix, "CosDiff")[0])
runix = etable.NewIdxView(dt)
spl = split.GroupBy(runix, go.Slice_string(["Params"]))
split.Desc(spl, "PctCor")
ss.RunStats = spl.AggsToTable(etable.AddAggName)
# note: essential to use Go version of update when called from another goroutine
ss.RunPlot.GoUpdate()
if ss.RunFile != 0:
if row == 0:
dt.WriteCSVHeaders(ss.RunFile, etable.Tab)
dt.WriteCSVRow(ss.RunFile, row, etable.Tab)
def ConfigRunLog(ss, dt):
dt.SetMetaData("name", "RunLog")
dt.SetMetaData("desc", "Record of performance at end of training")
dt.SetMetaData("read-only", "true")
dt.SetMetaData("precision", str(LogPrec))
sch = etable.Schema(
[etable.Column("Run", etensor.INT64, go.nil, go.nil),
etable.Column("Params", etensor.STRING, go.nil, go.nil),
etable.Column("SSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("AvgSSE", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctErr", etensor.FLOAT64, go.nil, go.nil),
etable.Column("PctCor", etensor.FLOAT64, go.nil, go.nil),
etable.Column("CosDiff", etensor.FLOAT64, go.nil, go.nil)]
)
dt.SetFromSchema(sch, 0)
def ConfigRunPlot(ss, plt, dt):
plt.Params.Title = "Stroop Run Plot"
plt.Params.XAxisCol = "Run"
plt.SetTable(dt)
# order of params: on, fixMin, min, fixMax, max
plt.SetColParams("Run", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("SSE", eplot.On, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("AvgSSE", eplot.Off, eplot.FixMin, 0, eplot.FloatMax, 0)
plt.SetColParams("PctErr", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("PctCor", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
plt.SetColParams("CosDiff", eplot.Off, eplot.FixMin, 0, eplot.FixMax, 1)
return plt
def ConfigNetView(ss, nv):
nv.ViewDefaults()
nv.Scene().Camera.Pose.Pos.Set(0.1, 1.8, 3.5)
nv.Scene().Camera.LookAt(mat32.Vec3(0.1, 0.15, 0), mat32.Vec3(0, 1, 0))
labs = go.Slice_string([" g r", " G R", " gr rd", " g r G R", " cn wr"])
nv.ConfigLabels(labs)
lays = go.Slice_string(["Colors", "Words", "Output", "Hidden", "PFC"])
for li, lnm in enumerate(lays):
ly = nv.LayerByName(lnm)
lbl = nv.LabelByName(labs[li])
lbl.Pose = ly.Pose
lbl.Pose.Pos.Y += .2
lbl.Pose.Pos.Z += .02
lbl.Pose.Scale.SetMul(mat32.Vec3(0.4, 0.06, 0.5))
def ConfigGui(ss):
"""
ConfigGui configures the GoGi gui interface for this simulation,
"""
width = 1600
height = 1200
gi.SetAppName("stroop")
gi.SetAppAbout('illustrates how the PFC can produce top-down biasing for executive control, in the context of the widely-studied Stroop task. See <a href="https://github.com/CompCogNeuro/sims/blob/master/ch10/stroop/README.md">README.md on GitHub</a>.</p>')
win = gi.NewMainWindow("stroop", "Stroop", width, height)
ss.Win = win
vp = win.WinViewport2D()
ss.vp = vp
updt = vp.UpdateStart()
mfr = win.SetMainFrame()
tbar = gi.AddNewToolBar(mfr, "tbar")
tbar.SetStretchMaxWidth()
ss.ToolBar = tbar
split = gi.AddNewSplitView(mfr, "split")
split.Dim = mat32.X
split.SetStretchMax()
cv = ss.NewClassView("sv")
cv.AddFrame(split)
cv.Config()
tv = gi.AddNewTabView(split, "tv")
nv = netview.NetView()
tv.AddTab(nv, "NetView")
nv.Var = "Act"
nv.SetNet(ss.Net)
ss.NetView = nv
ss.ConfigNetView(nv)
plt = eplot.Plot2D()
tv.AddTab(plt, "TrnEpcPlot")
ss.TrnEpcPlot = ss.ConfigTrnEpcPlot(plt, ss.TrnEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstTrlPlot")
ss.TstTrlPlot = ss.ConfigTstTrlPlot(plt, ss.TstTrlLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "SOATrlPlot")
ss.SOATrlPlot = ss.ConfigSOATrlPlot(plt, ss.SOATrlLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "TstEpcPlot")
ss.TstEpcPlot = ss.ConfigTstEpcPlot(plt, ss.TstEpcLog)
plt = eplot.Plot2D()
tv.AddTab(plt, "RunPlot")
ss.RunPlot = ss.ConfigRunPlot(plt, ss.RunLog)
split.SetSplitsList(go.Slice_float32([.2, .8]))
recv = win.This()
tbar.AddAction(gi.ActOpts(Label="Init", Icon="update", Tooltip="Initialize everything including network weights, and start over. Also applies current params.", UpdateFunc=UpdtFuncNotRunning), recv, InitCB)
tbar.AddAction(gi.ActOpts(Label="Train", Icon="run", Tooltip="Starts the network training, picking up from wherever it may have left off. If not stopped, training will complete the specified number of Runs through the full number of Epochs of training, with testing automatically occuring at the specified interval.", UpdateFunc=UpdtFuncNotRunning), recv, TrainCB)
tbar.AddAction(gi.ActOpts(Label="Stop", Icon="stop", Tooltip="Interrupts running. Hitting Train again will pick back up where it left off.", UpdateFunc=UpdtFuncRunning), recv, StopCB)
tbar.AddAction(gi.ActOpts(Label="Step Trial", Icon="step-fwd", Tooltip="Advances one training trial at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepTrialCB)
tbar.AddAction(gi.ActOpts(Label="Step Epoch", Icon="fast-fwd", Tooltip="Advances one epoch (complete set of training patterns) at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepEpochCB)
tbar.AddAction(gi.ActOpts(Label="Step Run", Icon="fast-fwd", Tooltip="Advances one full training Run at a time.", UpdateFunc=UpdtFuncNotRunning), recv, StepRunCB)
tbar.AddSeparator("test")
tbar.AddAction(gi.ActOpts(Label="Test Trial", Icon="step-fwd", Tooltip="Runs the next testing trial.", UpdateFunc=UpdtFuncNotRunning), recv, TestTrialCB)
tbar.AddAction(gi.ActOpts(Label="Test All", Icon="fast-fwd", Tooltip="Tests all of the testing trials.", UpdateFunc=UpdtFuncNotRunning), recv, TestAllCB)
tbar.AddAction(gi.ActOpts(Label= "SOA Test Trial", Icon= "step-fwd", Tooltip= "Runs the next testing trial.", UpdateFunc=UpdtFuncNotRunning), recv, SOATestTrialCB)
tbar.AddAction(gi.ActOpts(Label= "SOA Test All", Icon= "fast-fwd", Tooltip= "Tests all of the testing trials.", UpdateFunc=UpdtFuncNotRunning), recv, SOATestAllCB)
tbar.AddSeparator("misc")
tbar.AddAction(gi.ActOpts(Label= "Reset TstTrlLog", Icon= "reset", Tooltip= "Reset the test trial log -- otherwise it accumulates to compare across parameters etc."), recv, ResetTstTrlLogCB)
tbar.AddAction(gi.ActOpts(Label="New Seed", Icon="new", Tooltip="Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time."), recv, NewRndSeedCB)
tbar.AddAction(gi.ActOpts(Label= "Defaults", Icon= "update", Tooltip= "Restore initial default parameters.", UpdateFunc=UpdtFuncNotRunning), recv, DefaultsCB)
tbar.AddAction(gi.ActOpts(Label="README", Icon="file-markdown", Tooltip="Opens your browser on the README file that contains instructions for how to run this model."), recv, ReadmeCB)
# main menu
appnm = gi.AppName()
mmen = win.MainMenu
mmen.ConfigMenus(go.Slice_string([appnm, "File", "Edit", "Window"]))
amen = gi.Action(win.MainMenu.ChildByName(appnm, 0))
amen.Menu.AddAppMenu(win)
emen = gi.Action(win.MainMenu.ChildByName("Edit", 1))
emen.Menu.AddCopyCutPaste(win)
# note: Command in shortcuts is automatically translated into Control for
# Linux, Windows or Meta for MacOS
# fmen := win.MainMenu.ChildByName("File", 0).(*gi.Action)
# fmen.Menu.AddAction(gi.ActOpts{Label: "Open", Shortcut: "Command+O"},
# win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
# FileViewOpenSVG(vp)
# })
# fmen.Menu.AddSeparator("csep")
# fmen.Menu.AddAction(gi.ActOpts{Label: "Close Window", Shortcut: "Command+W"},
# win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
# win.Close()
# })
win.MainMenuUpdated()
vp.UpdateEndNoSig(updt)
win.GoStartEventLoop()
# TheSim is the overall state for this simulation
TheSim = Sim()
def main(argv):
TheSim.Config()
TheSim.ConfigGui()
TheSim.Init()
main(sys.argv[1:])
| StarcoderdataPython |
11260087 | <reponame>nazroll/gilacoolbot
import bot
from flask import Flask
from flask_restful import Api
app = Flask(__name__)
api = Api(app)
api.add_resource(bot.facebook.MainHandler, '/facebook/')
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
11222573 | import warnings
from .defaults import * # noqa
try:
from .locals import * # noqa
except ImportError:
warnings.warn(
'No settings file found. Did you remember to '
'copy local-dist.py to local.py?', ImportWarning,
)
| StarcoderdataPython |
81029 | """
2D–3D Geometric Fusion network using Multi-Neighbourhood Graph Convolution for RGB-D indoor scene classification
2021 <NAME> <<EMAIL>>
"""
import torch
import os
import argparse
from tqdm import tqdm
import twostream_network as models
from pc_img_h5_dataset import PCIMGH5Dataset, custom_collate
from Fusion2D3DMUNEGC.utilities import utils
from Fusion2D3DMUNEGC.utilities import metrics
@torch.no_grad()
def test(model, loader, label_names, cuda=True):
model.eval()
loader = tqdm(loader, ncols=100)
cm = metrics.ConfusionMatrixMeter(label_names, cmap='Blues')
for i, batch in enumerate(loader, start=0):
batch_1, batch_2 = batch
if cuda:
batch_1 = batch_1.to('cuda:0')
batch_2 = batch_2.to('cuda:0')
outputs = model(batch_1, batch_2)
out_device = outputs.device
gt = batch_1.y.to(out_device)
gt_value = gt.detach().cpu().data.numpy()
outputs_value = outputs.detach().cpu().data.numpy()
cm.add(gt_value, outputs_value)
torch.cuda.empty_cache()
return cm.mean_acc()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='FusionClassificationStage')
parser.add_argument('--batch_size', type=int, default=32, help='int, batch size. Default 32')
parser.add_argument('--cuda', default=False, action='store_true', help='Bool, activate cuda')
parser.add_argument('--multigpu', default=False, action='store_true', help='Bool, activate multigpu')
parser.add_argument('--lastgpu', type=int, default=0,
help='int, parameter to indicate which is the last gpu in multigpu scenarios')
parser.add_argument('--nworkers', type=int, default=4,
help='int, num subprocesses to use for data loading. 0 means that the data will be loaded in the main process')
parser.add_argument('--dataset_path', type=str, default='datasets/nyu_v2',
help='str, root path to the dataset folder')
parser.add_argument('--dataset_folder_b1', type=str, default='h5/h5_feat3d',
help="str, folder that contains the h5 files for branch 1")
parser.add_argument('--dataset_folder_b2', type=str, default='h5/h5_feat2d',
help='str, folder that contains the h5 files for branch 2')
parser.add_argument('--test_split', type=str, default='list/test_list.txt',
help="str, path to the txt file that contains the list of files used on testing")
parser.add_argument('--classname', type=str, default='list/scenes_labels.txt',
help='str, path to the file that contains the name of the classes')
parser.add_argument('--dataset', type=str, default='nyu_v2', help='str, name of the dataset used')
parser.add_argument('--pos_int16', default=False, action='store_true',
help='bool, positions are encoded with int16')
parser.add_argument('--nfeatures_b1', type=int, default='128',
help='int, number of features used as input on branch 1')
parser.add_argument('--nfeatures_b2', type=int, default='512',
help='int, number of features used as input on branch 2')
parser.add_argument('--proj_b1', default=False, action='store_true',
help='bool, activate projection function in branch 1')
parser.add_argument('--proj_b2', default=False, action='store_true',
help='bool, activate projection function in branch 2')
parser.add_argument('--features_proj_b1', type=int, default='256',
help='number of output channels of the projection in branch 1')
parser.add_argument('--features_proj_b2', type=int, default='256',
help='number of output channels of the projection in branch 2')
parser.add_argument('--rad_fuse_pool', type=float, default=0.35,
help='float, radius used to create the voxel used to fuse both branches')
parser.add_argument('--classification_model', type=str, default='gp_avg, b, r, d_0.5, f_10_cp_1',
help='str, defines the model as a sequence of layers')
parser.add_argument('--pretrain_path', type=str, default='-', help='str, path to the checkpoint to be used')
args = parser.parse_args()
features_b1 = args.nfeatures_b1
features_b2 = args.nfeatures_b2
features_proj_b1 = args.features_proj_b1
features_proj_b2 = args.features_proj_b2
model = models.TwoStreamNetwork(args.classification_model, features_b1,
features_b2, args.rad_fuse_pool,
args.multigpu,
features_proj_b1=features_proj_b1,
features_proj_b2=features_proj_b2,
proj_b1=args.proj_b1,
proj_b2=args.proj_b2)
print('loading pretrain')
if (os.path.isfile(args.pretrain_path)):
_, _, model_state, _ = utils.load_checkpoint(args.pretrain_path)
model.load_state_dict(model_state)
else:
print('Wrong pretrain path')
exit()
print(model)
if args.cuda is True and args.multigpu is False:
model.to('cuda:0')
label_path = os.path.join(args.dataset_path, args.classname)
if not os.path.isfile(label_path):
raise RuntimeError("label file does not exist")
label_names = utils.read_string_list(label_path)
test_dataset = PCIMGH5Dataset(args.dataset_path, args.dataset_folder_b1,
args.dataset_folder_b2,
args.test_split,
pos_int16=args.pos_int16)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
num_workers=args.nworkers,
shuffle=False,
pin_memory=True,
collate_fn=custom_collate
)
mean_acc = test(model, test_loader,
label_names,
cuda=args.cuda)
print("Mean acc: ", mean_acc)
| StarcoderdataPython |
1622573 | from typing import Any
from .base import api_function, BaseFunction
from ..request import Request
class Manager(BaseFunction):
"""
Provides controlling of the gateway/manager servers.
.. versionadded:: 18.12
"""
@api_function
@classmethod
async def status(cls):
"""
Returns the current status of the configured API server.
"""
rqst = Request('GET', '/manager/status')
rqst.set_json({
'status': 'running',
})
async with rqst.fetch() as resp:
return await resp.json()
@api_function
@classmethod
async def freeze(cls, force_kill: bool = False):
"""
Freezes the configured API server.
Any API clients will no longer be able to create new compute sessions nor
create and modify vfolders/keypairs/etc.
This is used to enter the maintenance mode of the server for unobtrusive
manager and/or agent upgrades.
:param force_kill: If set ``True``, immediately shuts down all running
compute sessions forcibly. If not set, clients who have running compute
session are still able to interact with them though they cannot create
new compute sessions.
"""
rqst = Request('PUT', '/manager/status')
rqst.set_json({
'status': 'frozen',
'force_kill': force_kill,
})
async with rqst.fetch():
pass
@api_function
@classmethod
async def unfreeze(cls):
"""
Unfreezes the configured API server so that it resumes to normal operation.
"""
rqst = Request('PUT', '/manager/status')
rqst.set_json({
'status': 'running',
})
async with rqst.fetch():
pass
@api_function
@classmethod
async def get_announcement(cls):
'''
Get current announcement.
'''
rqst = Request('GET', '/manager/announcement')
async with rqst.fetch() as resp:
return await resp.json()
@api_function
@classmethod
async def update_announcement(cls, enabled: bool = True, message: str = None):
'''
Update (create / delete) announcement.
:param enabled: If set ``False``, delete announcement.
:param message: Announcement message. Required if ``enabled`` is True.
'''
rqst = Request('POST', '/manager/announcement')
rqst.set_json({
'enabled': enabled,
'message': message,
})
async with rqst.fetch():
pass
@api_function
@classmethod
async def scheduler_op(cls, op: str, args: Any):
'''
Perform a scheduler operation.
:param op: The name of scheduler operation.
:param args: Arguments specific to the given operation.
'''
rqst = Request('POST', '/manager/scheduler/operation')
rqst.set_json({
'op': op,
'args': args,
})
async with rqst.fetch():
pass
| StarcoderdataPython |
11264886 | 'yanna'
#!/usr/bin/env python3
import sys
from Crypto.PublicKey import RSA
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, _ = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
def to_c_string(x):
mod = (hex(x)[2:-1].rjust(0x100, '0'))
hh = ''.join('\\x' + mod[i:i + 2] for i in range(0, 0x100, 2))
return hh
def to_c_uint32(x):
nums = []
for _ in range(0x20):
nums.append(x % (2**32))
x //= (2**32)
return "{" + 'U,'.join(map(str, nums)) + "U}"
for fn in sys.argv[1:]:
rsa = RSA.importKey(open(fn).read())
rr = pow(2**1024, 2, rsa.n)
n0inv = 2**32 - modinv(rsa.n, 2**32)
cname = fn.split("/")[-1].split(".")[0] + "_rsa_key"
print('RSAPublicKey ' + cname + ' = {.len = 0x20,')
print(' .n0inv = %dU,' % n0inv)
print(' .n = %s,' % to_c_uint32(rsa.n))
print(' .rr = %s,' % to_c_uint32(rr))
print(' .exponent = %d,' % rsa.e)
print('};')
| StarcoderdataPython |
3461231 | """Tests for the apns component."""
| StarcoderdataPython |
6568332 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.interfaces.utils import propagateClkRstn, addClkRstn
from hwt.simulator.simTestCase import SimTestCase
from hwt.synthesizer.unit import Unit
from hwtLib.amba.axi4Lite import Axi4Lite
from hwtLib.amba.axiLite_comp.sim.ram import Axi4LiteSimRam
from hwtLib.amba.axi_comp.builder import AxiBuilder
from hwtLib.amba.constants import PROT_DEFAULT, RESP_OKAY
from hwtLib.cesnet.mi32.axi4Lite_to_mi32 import Axi4Lite_to_Mi32
from hwtLib.cesnet.mi32.intf import Mi32
from hwtLib.cesnet.mi32.to_axi4Lite import Mi32_to_Axi4Lite
from hwtSimApi.constants import CLK_PERIOD
from hwtLib.amba.axiLite_comp.sim.utils import axi_randomize_per_channel
class Axi4LiteMi32Bridges(Unit):
"""
:class:`hwt.synthesizer.unit.Unit` with AxiLiteEndpoint + AxiLiteReg + AxiLite2Mi32 + Mi32_2AxiLite
"""
def _config(self):
Mi32._config(self)
def _declr(self):
addClkRstn(self)
with self._paramsShared():
self.s = Axi4Lite()
self.toMi32 = Axi4Lite_to_Mi32()
self.toAxi = Mi32_to_Axi4Lite()
self.m = Axi4Lite()._m()
def _impl(self):
propagateClkRstn(self)
toMi32 = self.toMi32
toAxi = self.toAxi
m = AxiBuilder(self, self.s).buff().end
toMi32.s(m)
toAxi.s(toMi32.m)
self.m(AxiBuilder(self, toAxi.m).buff().end)
class Mi32Axi4LiteBrigesTC(SimTestCase):
@classmethod
def setUpClass(cls):
u = cls.u = Axi4LiteMi32Bridges()
cls.compileSim(u)
def randomize_all(self):
u = self.u
for i in [u.m, u.s]:
axi_randomize_per_channel(self, i)
def setUp(self):
SimTestCase.setUp(self)
u = self.u
self.memory = Axi4LiteSimRam(axi=u.m)
def test_nop(self):
self.randomize_all()
self.runSim(10 * CLK_PERIOD)
u = self.u
for i in [u.m, u.s]:
self.assertEmpty(i.ar._ag.data)
self.assertEmpty(i.aw._ag.data)
self.assertEmpty(i.r._ag.data)
self.assertEmpty(i.w._ag.data)
self.assertEmpty(i.b._ag.data)
def test_read(self):
u = self.u
N = 10
a_trans = [(i * 0x4, PROT_DEFAULT) for i in range(N)]
for i in range(N):
self.memory.data[i] = i + 1
u.s.ar._ag.data.extend(a_trans)
#self.randomize_all()
self.runSim(N * 10 * CLK_PERIOD)
u = self.u
for i in [u.s, u.m]:
self.assertEmpty(i.aw._ag.data)
self.assertEmpty(i.w._ag.data)
self.assertEmpty(i.b._ag.data)
r_trans = [(i + 1, RESP_OKAY) for i in range(N)]
self.assertValSequenceEqual(u.s.r._ag.data, r_trans)
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(Mi32Axi4LiteBrigesTC('test_singleLong'))
suite.addTest(unittest.makeSuite(Mi32Axi4LiteBrigesTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| StarcoderdataPython |
1749457 | # FCC ULS Callsign Search
# Author: <NAME>
# Repository: https://github.com/cyberphilia/fcc_uls_callsign_search
# Callsign:W4NTS
# Quick Start: python .\fcc_uls_callsign_search.py
import os
import time
from ftplib import FTP
from zipfile import ZipFile
import callsign
import database
# from pathlib import Path
# data_folder = Path("temp/")
# file_to_open = data_folder / "raw_data.txt"
# print(file_to_open)
def get_ULS_Zip():
print('Downloading Zip')
ftp = FTP('wirelessftp.fcc.gov')
#ftp.login(user='username', passwd = 'password')
ftp.login()
ftp.cwd('/pub/uls/complete/')
filename = 'l_amat.zip'
localfile = open(filename, 'wb')
ftp.retrbinary('RETR ' + filename, localfile.write, 1024)
ftp.quit()
localfile.close()
print('Download complete')
def unzip_ULS(filename):
if os.path.exists("l_amat.zip"):
print('Unzipping file')
with ZipFile(filename, 'r') as zipObj:
zipObj.extractall('temp')
print('Unzipping complete')
def ULSZipCheck():
if os.path.exists("l_amat.zip"):
file_modified = os.path.getmtime('l_amat.zip')
current_time = time.time()
time_difference = current_time - file_modified
if time_difference > 604800: # 604800 one week in seconds
get_ULS_Zip()
else:
print('ULS Zip file not found.')
get_ULS_Zip()
unzip_ULS('l_amat.zip')
def print_menu():
print('FCC ULS Callsign Search')
print(' 0: Exit')
print(' 1: Search for Callsign')
print(' 2: Process callsign_input.txt')
# print(' 3: SQL Query')
print('')
def search(db, input):
rtn = ''
cs = callsign.Callsign(input)
rtn += 'Callsign: {}\n'.format(cs.callsign)
rtn += '==================\n'
rtn += 'Group: {}, Available To: {} \n\n'.format(cs.group,cs.available_to)
amateur = db.select_amateur(cs.callsign)
comments = db.select_comments(cs.callsign)
# entity = db.select_entity(cs.callsign)
history = db.select_history(cs.callsign)
if amateur:
rtn += 'Amateur: \n'
rtn += '------------------\n'
rtn += amateur +'\n'
if comments:
rtn += 'Comments: \n'
rtn += '------------------\n'
rtn += comments +'\n'
# if entity:
# rtn += 'Entity: \n'
# rtn += '------------------\n'
# rtn += entity +'\n'
if history:
rtn += 'History: \n'
rtn += '------------------\n'
rtn += history +'\n'
else:
rtn += 'History: None, {} Probably Available\n'.format(cs.callsign)
return rtn
def main():
ULSZipCheck()
# db = database.ULSDatabase('uls.db','temp/')
db = database.ULSDatabase(':memory:','temp/')
menu = True
while menu is not False:
print_menu()
option = input("Option:")
if option == '0':
menu = False
if option == '1':
callsign_input = input("Callsign?: ")
print()
print()
print(search(db,callsign_input))
if option == '2':
output_file= open('callsign_output.txt','w')
with open('callsign_input.txt', 'r') as reader:
for line in reader.readlines():
search_results = search(db,line.upper().strip())
print(search_results)
output_file.write(search_results)
output_file.close()
# print(line + '>>>>>')
# output_file.write(select_callsign(db_con,line.upper().strip())+'\n')
print()
print()
db.close_connection()
if __name__ == "__main__":
main() | StarcoderdataPython |
5046426 | """
94 Free Cash - https://codeforces.com/problemset/problem/237/A
"""
def main():
n = int(input())
d = {}
for _ in range(n):
x, y = map(int, input().split())
if (x,y) not in d:
d[(x,y)] = 1
else:
d[(x,y)] += 1
dd = sorted(d.items(), key= lambda x: x[1], reverse= True)
print (dd[0][1])
main() | StarcoderdataPython |
128099 | <reponame>LovelyA72/ScoreDraft
from .Instrument import Instrument
from .Catalog import Catalog
try:
from .Extensions import InitializeKarplusStrongInstrument
from .Extensions import KarplusStrongSetCutFrequency
from .Extensions import KarplusStrongSetLoopGain
from .Extensions import KarplusStrongSetSustainGain
Catalog['Engines'] += ['KarplusStrongInstrument - Instrument']
class KarplusStrongInstrument(Instrument):
def __init__(self):
self.id = InitializeKarplusStrongInstrument()
def setCutFrequency(self, cut_freq):
# This is the cut-frequency of the feedback filter for pitch 261.626Hz
KarplusStrongSetCutFrequency(self, cut_freq)
def setLoopGain(self, loop_gain):
KarplusStrongSetLoopGain(self, loop_gain)
def setSustainGain(self, sustain_gain):
KarplusStrongSetSustainGain(self, sustain_gain)
except ImportError:
pass
| StarcoderdataPython |
288957 | import matplotlib.pyplot as plt
import numpy as np
import pytest
from numba_celltree import demo
def test_close_polygons():
faces = np.array(
[
[0, 1, 2, -1, -1],
[0, 1, 2, 3, -1],
[0, 1, 2, 3, 4],
]
)
closed = demo.close_polygons(faces, -1)
expected = np.array(
[
[0, 1, 2, 0, 0, 0],
[0, 1, 2, 3, 0, 0],
[0, 1, 2, 3, 4, 0],
]
)
assert np.array_equal(closed, expected)
def test_edges():
faces = np.array(
[
[0, 1, 2, -1],
[1, 3, 4, 2],
]
)
actual = demo.edges(faces, -1)
expected = np.array(
[
[0, 1],
[0, 2],
[1, 2],
[1, 3],
[2, 4],
[3, 4],
]
)
assert np.array_equal(actual, expected)
def test_plot_edges():
_, ax = plt.subplots()
node_x = np.array([0.0, 1.0, 1.0, 2.0, 2.0])
node_y = np.array([0.0, 0.0, 1.0, 0.0, 1.0])
edges = np.array(
[
[0, 1],
[0, 2],
[1, 2],
[1, 3],
[2, 4],
[3, 4],
]
)
demo.plot_edges(node_x, node_y, edges, ax)
def test_plot_boxes():
boxes = np.array(
[
[0.0, 1.0, 0.0, 1.0],
[1.0, 2.0, 1.0, 2.0],
]
)
_, ax = plt.subplots()
demo.plot_boxes(boxes, ax)
boxes = np.array(
[
[0.0, 1.0, 0.0],
[1.0, 2.0, 1.0],
]
)
_, ax = plt.subplots()
with pytest.raises(ValueError):
demo.plot_boxes(boxes, ax)
| StarcoderdataPython |
6703616 | # extract_upstream_regions.py
# script that accepts a 2 column file of locus
# ids and a boolean flag of whether than locus
# might be in an operon. It returns a fasta file
# from 1 base before the ATG through -300bp
# upstream for all genes NOT possibly in an operon.
import argparse
from Bio import SeqIO
import os
import pandas as pd
import genbank_utils as gu
import get_top_gene_set as gtgs
# genbank feature tuple indices
LEFT_IDX = 0
RIGHT_IDX = 1
STRAND_IDX = 2
LOCUS_IDX = 3
GENE_IDX = 4
TYPE_IDX = 5
def load_loci(filename):
df = pd.read_csv(filename,sep='\t')
# return list of loci not flagged as potentially in an operon
loci = list(df[df['op?']==False]['locus_tag'].values)
return loci
def get_all_upstream_regions(gb_file,
window_size,
min_dist,# default=20 in argparse
no_truncate,# default=False in argparse
avoid_rbs,# default=None in argpase
verbose=False
):
'''
Given a genbank file, parse out all its features into 6-tuples
of (LEFT, RIGHT, STRAND, LOCUS, GENE, TYPE). Then use the sequence
in the genbank file to extract a window_size number of base pairs
upstream of each features start coordinate.
Truncate is on by default and will stop slicing if we run into
the coding sequence of another feature (so only extract the
intergenic sequence).
However some features are too close together (and possibly even
overlapping). While most of these should have been ignored in
the operon estimation step of get_top_gene_sets.py, some genes
are oriented divergently and thus still not in an operon. If
the distance between two loci is < min_dist, we extract min dist
anyways, even if it runs into another locus annotation.
'''
# load genbank features and genome
feats,genome = gu.get_feats_and_genome(gb_file)
# dictionary to collect feature upstream seqs
upstream_regions = {}
# loop through features and get upstream for all
for i,cur_feat in enumerate(feats):
# +-----------------+
# | NEGATIVE STRAND |
# +-----------------+
# if we're on the negative strand, go 300bp to the right, reverse compelement
if cur_feat[STRAND_IDX] == -1:
# get the range of the promoter region
p_left = cur_feat[RIGHT_IDX] + 1
p_right = p_left + window_size
# extract a slice from the genome at the proper coordinates
# revcomp because on reverse
seq = genome[p_left:p_right].reverse_complement()
# if in truncate mode, check if upstream feat is too close
if not no_truncate:
# make sure we're not at the last feat in the list (no
# rightward gene upstream)
if i < len(feats) - 1:
# get the FOLLOING feature (because on -1 strand)
upstream_feat = feats[i+1]
# how far is the upstream feat from the current?
upstream_dist = upstream_feat[LEFT_IDX] - cur_feat[RIGHT_IDX]
# if it's closer than the window we extracted, we need
# to truncate
if upstream_dist < window_size:
if verbose:
if upstream_dist < min_dist:
print("SHORT DIST!")
print("Cur feat:")
print(cur_feat)
print("Up feat:")
print(upstream_feat)
print(f"distance:{upstream_dist}\n")
# if upstream distance is too small (features are closer than
# min_dist), then set the upstream dist to at least min_dist
upstream_dist = max(upstream_dist, min_dist)
# determine how much of the window to truncate to avoid
# running into the next feature
trunc_dist = window_size - upstream_dist
# truncate from the beginning of the seq (the upstreamer part)
# (we've already reverse complemented so we can still take the
# upstream part of the promoter seq even though this is the reverse
# section)
seq = seq[trunc_dist:]
# +-----------------+
# | POSITIVE STRAND |
# +-----------------+
# if we're on the positive strand, go 300bp to the left
elif cur_feat[STRAND_IDX] == 1:
p_right = cur_feat[LEFT_IDX] - 1
p_left = p_right - window_size
# extract a slice from the genome at the proper coordinates
seq = genome[p_left:p_right]
# if in truncate mode
if not no_truncate:
# make sure this isn't the very first feat in the list
# (no leftward upstream gene)
if i > 0:
# get the PREVIOUS feature (because on +1 strand)
upstream_feat = feats[i-1]
# how far is the upstream feat from the current?
upstream_dist = cur_feat[LEFT_IDX] - upstream_feat[RIGHT_IDX]
# if it's closer than the window we extracted, we need
# to truncate
if upstream_dist < window_size:
if verbose:
if upstream_dist < min_dist:
print("SHORT DIST!")
print("Cur feat:")
print(cur_feat)
print("Up feat:")
print(upstream_feat)
print(f"distance:{upstream_dist}\n")
# if upstream distance is too small (features are closer than
# min_dist), then set the upstream dist to at least min_dist
upstream_dist = max(upstream_dist, min_dist)
# determine how much of the window to truncate to avoid
# running into the next feature
trunc_dist = window_size - upstream_dist
# truncate from the beginning of the seq (the upstreamer part)
seq = seq[trunc_dist:]
else:
raise ValueError(f"Unknown strand type: {cur_feat[STRAND_IDX]}. Expected 1 or -1.")
# add the feature and its upstream seq to the dict
# key : locus_tag, value : upstream sequence string
if avoid_rbs:
# if the sequence is already short and the avoid_rbs amount
# would reduce the seq to shorter than min_dist, only reduce
# to the min dist.
bp_to_cut = min(avoid_rbs, len(seq)-min_dist)
if verbose:
print(f"Truncating {bp_to_cut} bases from end of seq")
seq = seq[:-bp_to_cut]
upstream_regions[cur_feat[LOCUS_IDX]] = seq
return upstream_regions
def write_fasta_file(args, loci, upstream_regions):
# load feature meta data
feat2meta = gu.get_feat2meta_dict(args.gb_file)
# construct output file name:
# use same base name as loci file
base = os.path.basename(args.loci_file).split('.')[0]
# append "_trunc" if in truncation mode
trunc_string = "" if args.no_trunc else "_trunc"
# append rbs string if in rbs_avoidance mode
rbs_flag = "" if not args.avoid_rbs else f"_RBSminus{args.avoid_rbs}"
# concat some relevant args
filename = f"{base}_upstream_regions_w{args.window_size}{rbs_flag}_min{args.min_dist}{trunc_string}.fa"
# path to outdir
out_path = os.path.join(args.outdir,filename)
# write fasta file
with open(out_path,'w') as f:
for loc in loci:
# some extra metadata for output readability
gene_symbol = feat2meta[loc]['gene_symbol']
product = feat2meta[loc]['product']
header = f">{loc}|{gene_symbol}|{product}"
f.write(f"{header}\n{upstream_regions[loc]}\n")
return out_path
# +-------------+
# | MAIN SCRIPT |
# +-------------+
def main():
# +------+
# | ARGS |
# +------+
parser = argparse.ArgumentParser(description='Extract upstream DNA regions from set of locus ids.')
# Required args
parser.add_argument('loci_file', help='Two-column tab-delimited file containing list of locus IDs and boolean operon flag')
parser.add_argument('gb_file', help='Genbank file with feature annotations')
parser.add_argument('outdir', help='Output directory where results are written')
# Optional args
parser.add_argument('-w', '--window_size',default=300, type=int, help='bp length of upstream region to extract')
parser.add_argument('-m', '--min_dist', default=20,type=int,help='Minimum upstream distance to extract, even if features are too close.')
parser.add_argument('-t', '--no_trunc', action='store_true',help='Turn OFF truncation mode - so always extract window_size bp, even if it overlaps with other features')
parser.add_argument('-r', '--avoid_rbs',nargs='?',type=int,const=15, default=None,help='Turn ON RBS avoidance to truncate the end of the extracted sequence by n bases (default n=15). It will not reduce a sequence to be shorter than min_dist')
args = parser.parse_args()
# get loci
print("Loading loci of interest...")
loci = load_loci(args.loci_file)
print(loci)
# get upstream regions
print("Getting upstream regions from genbank")
upstream_regions = get_all_upstream_regions(
args.gb_file,
args.window_size,
args.min_dist,
args.no_trunc,
args.avoid_rbs)
# +------+
# | SAVE |
# +------+
# save to a fasta file
print("Saving fasta of upstream regions for loci of interest...")
out_path = write_fasta_file(args, loci, upstream_regions)
print(f"Output written to {out_path}")
print("Done!")
if __name__ == '__main__':
main() | StarcoderdataPython |
12863027 | <gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image,Category,Location
def gallery_today(request):
gallery = Image.objects.all()
return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-galleries/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'all-galleries/search.html',{"message":message})
def filter_results(request):
if 'location' in request.GET and request.GET["location"]:
filter_term = request.GET.get("location")
filtered_images = Image.filter_by_location(filter_term)
message = f"{filter_term}"
return render(request, 'all-galleries/filter.html',{"message":message,"images": filtered_images})
else:
message = "You haven't filtered for any term"
return render(request, 'all-galleries/filter.html',{"message":message})
# def delete_image(request, pk):
# gallery = get_object_or_404(Cat, pk=pk)
# if request.method == 'POST':
# gallery.delete()
# return redirect('/')
# return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery}) | StarcoderdataPython |
8047181 | import json
import requests
from http import client as http_client
import pickle
import os
from bs4 import BeautifulSoup
from factorial.exceptions import AuthenticationTokenNotFound, UserNotLoggedIn, ApiError
import hashlib
import logging
import logging.config
import random
from datetime import date
from constants import BASE_PROJECT, LOGGER
class FactorialClient:
# Folder to save the session's cookie
SESSIONS_FOLDER = os.path.join(BASE_PROJECT, "sessions")
# Default factorial settings file
DEFAULT_FACTORIAL_SETTINGS = os.path.join(BASE_PROJECT, 'factorial_settings.json')
# Endpoints
BASE_NAME = "https://api.factorialhr.com/"
# Url to be able to login (post: username, password) and logout (delete) on the api
SESSION_URL = '{}sessions'.format(BASE_NAME)
# Url to show the form to get the authentication token (get)
LOGIN_PAGE_URL = '{}users/sign_in'.format(BASE_NAME)
# Url to get the user info (get)
USER_INFO_URL = '{}accesses'.format(BASE_NAME)
# Get employee (get)
EMPLOYEE_URL = '{}employees'.format(BASE_NAME)
# Get period (get)
PERIODS_URL = '{}attendance/periods'.format(BASE_NAME)
# Shift days (get, post, patch, delete)
SHIFT_URL = '{}attendance/shifts'.format(BASE_NAME)
# Calendar (get)
CALENDAR_URL = '{}attendance/calendar'.format(BASE_NAME)
def __init__(self, email, password, cookie_file=None):
"""Factorial client to automatically sign up the work
:param email: (required) string, email to login on Factorial
:param password: (required) string, password to login on Factorial
:param cookie_file: (optional) string, file to save the cookies
"""
self.email = email
self.password = password
self.current_user = {}
self.mates = []
self.session = requests.Session()
# Be able to save the cookies on a file specified, or save each user on a different email for multi account
self.cookie_file = cookie_file or hashlib.sha512(email.encode('utf-8')).hexdigest()
cookie_path = os.path.join(self.SESSIONS_FOLDER, self.cookie_file)
if os.path.exists(cookie_path):
with open(cookie_path, "rb") as file:
# TODO: Watch out the expiration of the cookie
LOGGER.info('Getting the session from cookies files')
self.session.cookies.update(pickle.load(file))
def login(self):
"""Login on the factorial web
:return: boolean if is logged in
"""
try:
self.load_user_data()
# Try to load the user info using the cookie, if can't login again using the username and password
LOGGER.info('Already logged in, re-login is not needed')
return True
except UserNotLoggedIn:
payload = {
'utf8': '✓',
'authenticity_token': self.generate_new_token(),
'user[email]': self.email,
'user[password]': <PASSWORD>,
'user[remember_me]': "0",
'commit': 'Iniciar sesión'
}
response = self.session.post(url=self.SESSION_URL, data=payload)
loggedin = response.status_code == http_client.CREATED
if loggedin:
LOGGER.info('Login successfully')
# Load user data
self.load_user_data()
# Save the cookies if is logged in
if not os.path.exists(self.SESSIONS_FOLDER):
os.mkdir(self.SESSIONS_FOLDER)
with open(os.path.join(self.SESSIONS_FOLDER, self.cookie_file), "wb") as file:
pickle.dump(self.session.cookies, file)
LOGGER.info('Sessions saved')
return loggedin
@staticmethod
def generate_new_token():
"""Generate new token to be able to login"""
response = requests.get(url=FactorialClient.LOGIN_PAGE_URL)
soup = BeautifulSoup(response.text, 'html.parser')
auth_token = soup.find('input', attrs={'name': 'authenticity_token'})
token_value = auth_token.get('value')
if not token_value:
raise AuthenticationTokenNotFound()
return token_value
@staticmethod
def load_from_settings(json_settings=DEFAULT_FACTORIAL_SETTINGS):
"""Login from the settings if the session still valid from the saved cookies, otherwise ask for the password
:param json_settings: string config filename
:return: FactorialClient
"""
with open(json_settings, 'r') as file:
settings = json.load(file)
factorial_client = FactorialClient(email=settings.get('email', ''),
password=settings.get('password', ''))
if not factorial_client.login():
# Session valid with the current cookie
raise ApiError('Cannot login with the given credentials')
return factorial_client
@staticmethod
def split_time(time):
"""Split time to hour and minutes
:param time: string time 7:30
:return: tuple(hours, minutes)
"""
return (int(t) for t in time.split(':'))
@staticmethod
def convert_to_minutes(hours, minutes):
"""Convert time to minutes
:param hours: int
:param minutes: int
:return: int
"""
return hours * 60 + minutes
@staticmethod
def convert_to_time(minutes):
"""Convert minutes to time
:param minutes: int
:return: tuple(hours, minutes)
"""
converted_hours = int(minutes / 60)
converted_minutes = int(minutes - converted_hours * 60)
return converted_hours, converted_minutes
@staticmethod
def get_total_minutes_period(start_hours, start_minutes, end_hours, end_minutes):
"""Get total minutes for a period
:param start_hours: int hours
:param start_minutes: int minutes
:param end_hours: int hours
:param end_minutes: int minutes
:return: total minutes
"""
start_minutes = FactorialClient.convert_to_minutes(start_hours, start_minutes)
end_minutes = FactorialClient.convert_to_minutes(end_hours, end_minutes)
return end_minutes - start_minutes
@staticmethod
def get_random_number(start, end):
"""Get random number between two numbers, both included
Eg:
start = -10
end = 10
1 * (10 - -10) + -10 = 10
0 * (10 - -10) + -10 = -10
:param start: int start
:param end: int end
:return: int random number between start and end
"""
return random.random() * (end - start) + start
@staticmethod
def random_time(hours, minutes, minutes_variation):
"""Variation between minutes
:param hours: int current hour
:param minutes: int current minutes
:param minutes_variation: int minutes to variate
:return: tuple (hours, minutes)
"""
# Minutes variation of 10 will be a random between -10 and 10
random_minutes_variation = FactorialClient.get_random_number(start=-minutes_variation, end=minutes_variation)
# Pass hours and minutes to all minutes
total_minutes = FactorialClient.convert_to_minutes(hours, minutes)
# Remove or add the minutes variation
variated_minutes = total_minutes + random_minutes_variation
# Pass to hours and minutes
return FactorialClient.convert_to_time(variated_minutes)
def check_status_code(self, status_code, status_code_error, message=None):
"""Check if the call of the endpoint is correct
:param status_code: HttpStatus
:param status_code_error: HttpStatus
:param message: string
"""
if status_code == http_client.UNAUTHORIZED:
raise UserNotLoggedIn()
elif status_code != status_code_error:
raise ApiError(message)
def generate_period(self, start, end, minutes_variation):
"""Generate a period with a random variation
:param start: string time
:param end: string time
:param minutes_variation: int minutes to variate
:return: tuple (start_hours, start_minutes, end_hours, end_minutes)
"""
start_hours, start_minutes = self.split_time(start)
end_hours, end_minutes = self.split_time(end)
total_minutes = self.get_total_minutes_period(start_hours, start_minutes, end_hours, end_minutes)
start_sign_hour, start_sign_minutes = FactorialClient.random_time(start_hours, start_minutes, minutes_variation)
end_sign_hour, end_sign_minutes = self.convert_to_time(self.convert_to_minutes(start_sign_hour, start_sign_minutes) + total_minutes)
return start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes
def add_breaks_to_period(self, start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes, breaks):
"""Add breaks for a period
:return list of periods, tuple(start_hour, start_minute, end_hour, end_minute)
"""
periods = []
start_hour = start_sign_hour
start_minute = start_sign_minutes
for _break in sorted(breaks, key=lambda current_break: self.convert_to_minutes(current_break['start_hour'], current_break['start_minute']), reverse=False):
break_start_hour = _break.get('start_hour')
break_start_minute = _break.get('start_minute')
break_end_hour = _break.get('end_hour')
break_end_minute = _break.get('end_minute')
periods.append({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': break_start_hour,
'end_minute': break_start_minute
})
start_hour = break_end_hour
start_minute = break_end_minute
# End period
periods.append({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': end_sign_hour,
'end_minute': end_sign_minutes
})
return periods
def generate_worked_periods(self, start_work, end_work, work_minutes_variation, breaks):
"""Generate worked periods with breaks
:param start_work: string time
:param end_work: string time
:param work_minutes_variation: int minutes to variate
:param breaks: list of dictionaries
:return: list of periods
"""
start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes = self.generate_period(start_work, end_work, work_minutes_variation)
breaks_with_variation = []
for _break in breaks:
start_break_hour, start_break_minutes, end_break_hour, end_break_minutes = self.generate_period(**_break)
breaks_with_variation.append({
'start_hour': start_break_hour,
'start_minute': start_break_minutes,
'end_hour': end_break_hour,
'end_minute': end_break_minutes
})
return self.add_breaks_to_period(start_sign_hour, start_sign_minutes, end_sign_hour, end_sign_minutes, breaks_with_variation)
def worked_day(self, day=date.today(), json_settings=DEFAULT_FACTORIAL_SETTINGS):
"""Mark today as worked day
:param day: date to save the worked day, by default is today
:param json_settings: string config filename
"""
with open(json_settings, 'r') as file:
settings = json.load(file)
work_settings_block = settings.get('work', {})
start_work = work_settings_block.get('start', '')
end_work = work_settings_block.get('end', '')
work_minutes_variation = work_settings_block.get('minutes_variation', 0)
breaks = work_settings_block.get('breaks', [])
already_work = self.get_day(year=day.year, month=day.month, day=day.day)
if already_work:
if work_settings_block.get('resave'):
for worked_period in already_work:
self.delete_worked_period(worked_period.get('id'))
else:
LOGGER.info('Day already sign')
return
add_worked_period_kwargs = {
'year': day.year,
'month': day.month,
'day': day.day,
# Dynamic over loop fields
'start_hour': 0,
'start_minute': 0,
'end_hour': 0,
'end_minute': 0
}
worked_periods = self.generate_worked_periods(start_work, end_work, work_minutes_variation, breaks)
for worked_period in worked_periods:
start_hour = worked_period.get('start_hour')
start_minute = worked_period.get('start_minute')
end_hour = worked_period.get('end_hour')
end_minute = worked_period.get('end_minute')
add_worked_period_kwargs.update({
'start_hour': start_hour,
'start_minute': start_minute,
'end_hour': end_hour,
'end_minute': end_minute,
})
if self.add_worked_period(**add_worked_period_kwargs):
LOGGER.info('Saved worked period for the day {0:s} between {1:02d}:{2:02d} - {3:02d}:{4:02d}'.format(
day.isoformat(),
start_hour, start_minute,
end_hour, end_minute))
def logout(self):
"""Logout invalidating that session, invalidating the cookie _factorial_session
:return: bool
"""
response = self.session.delete(url=self.SESSION_URL)
logout_correcty = response.status_code == http_client.NO_CONTENT
LOGGER.info('Logout successfully {}'.format(logout_correcty))
self.session = requests.Session()
path_file = os.path.join(self.SESSIONS_FOLDER, self.cookie_file)
if os.path.exists(path_file):
os.remove(path_file)
logging.info('Logout: Removed cookies file')
self.mates.clear()
self.current_user = {}
return logout_correcty
def load_employees(self):
"""Load employees info
Example:
[
{
'access_id'
'birthday_on'
'hired_on'
'job_title'
'id',
'manager_id'
'supervised_by_current'
'terminated_on'
'is_terminating'
'timeoff_policy_id'
'timeoff_manager_id'
'timeoff_supervised_by_current'
'location_id'
'employee_group_id'
'payroll_hiring_id'
'is_eligible_for_payroll'
}
]
"""
LOGGER.info("Loading employees")
employee_response = self.session.get(self.EMPLOYEE_URL)
self.check_status_code(employee_response.status_code, http_client.OK)
employee_json = employee_response.json()
for employee in employee_json:
# Update the user info that match the self.mates[n].id with employee.access_id
for mate in self.mates:
if mate.get('id') == employee.get('access_id'):
mate.update(employee)
if self.current_user.get('id') == employee.get('access_id'):
self.current_user.update(employee)
def load_user_data(self):
"""Load info about your user
Example:
```
[
{
"id": <integer>,
"user_id": <integer>,
"company_id": <integer>,
"invited": true,
"invited_on": "YYYY-MM-DD",
"role": "basic",
"current": true/false,
"calendar_token": null,
"first_name": "sss",
"last_name": "sss",
"email": "sss@sss",
"unconfirmed_email": null,
"joined": true/false,
"locale": "xx",
"avatar": null,
"tos": true
},
...
]
```
"""
self.mates.clear()
self.current_user = {}
response = self.session.get(url=self.USER_INFO_URL)
self.check_status_code(response.status_code, http_client.OK)
json_response = response.json()
for user in json_response:
current_user = user
if current_user.get('current', False):
self.current_user = current_user
else:
self.mates.append(current_user)
self.load_employees()
def get_period(self, year, month):
"""Get the info a period
Example:
[
{
"id": Period id<integer>,
"employee_id": Employee id<integer>,
"year": Year<integer>,
"month": Month<integer>,
"state": Status<string>,
"estimated_minutes": Estimated minutes<integer>,
"worked_minutes": Worked minuted<integer>,
"distribution": Worked minutes each day Array<integer>[
450,
450,
465,
0,
0,
0,
0,
450,
440,
452,
450,
455,
0,
0,
470,
450,
457,
450,
450,
0,
0,
450,
450,
450,
465,
465,
0,
0,
460,
0,
0
],
"estimated_hours_in_cents": Hours in cents<integer>,
"worked_hours_in_cents": Worked hours in cents<integer>,
"distribution_in_cents": Cents earned each day Array<integer>[
750,
750,
775,
0,
0,
0,
0,
750,
733,
753,
750,
758,
0,
0,
783,
750,
761,
750,
750,
0,
0,
750,
750,
750,
775,
775,
0,
0,
766,
0,
0
]
}
]
:param year: integer
:param month: integer
:return: dictionary
"""
params = {
'year': year,
'month': month,
'employee_id': self.current_user.get('id', '')
}
response = self.session.get(url=self.PERIODS_URL, params=params)
self.check_status_code(response.status_code, http_client.OK)
return response.json()
def get_shift(self, year, month):
"""Get the current calendar with its worked days
:param year: integer
:param month: integer
:return dictionary
"""
period = self.get_period(year=year, month=month)
current_period = period[0]
period_id = current_period['id']
params = {
'period_id': period_id
}
response = self.session.get(self.SHIFT_URL, params=params)
self.check_status_code(response.status_code, http_client.OK)
return response.json()
def get_day(self, year, month, day):
"""Get a specific worked day
:param year: integer
:param month: integer
:param day: integer
:return: dictionary
"""
calendar = self.get_shift(year=year, month=month)
worked_hours = []
# return next(day_it for day_it in calendar if day_it['day'] == day)
for day_it in calendar:
if day_it.get('day') == day:
worked_hours.append(day_it)
return worked_hours
def get_calendar(self, year, month, **kwargs):
"""Get all the laborable and left days
:param year: int
:param month: int
:return: list of dictionary
"""
params = {
'id': self.current_user.get('id'),
'year': year,
'month': month
}
response = self.session.get(self.CALENDAR_URL, params=params)
self.check_status_code(response.status_code, http_client.OK)
response = response.json()
for param, value in kwargs.items():
response = [day for day in response if day.get(param) == value]
return response
def add_worked_period(self, year, month, day, start_hour, start_minute, end_hour, end_minute):
"""Add the period as worked
Example to create a worked period for the day 2019-07-31 from 7:30 to 15:30
- year 2019
- month 7
- day 31
- start_hour 7
- start_minute 30
- end_hour 15
- end_minute 30
:param year: integer
:param month: integer
:param day: integer
:param start_hour: integer
:param start_minute: integer
:param end_hour: integer
:param end_minute: integer
:return bool: correctly saved
"""
# Check if are vacations
calendar = self.get_calendar(year=year, month=month, is_leave=True)
formatted_date = f'{year:04d}-{month:02d}-{day:02d}'
for calendar_day in calendar:
if calendar_day.get('date') == formatted_date:
LOGGER.info(f"Can't sign today {formatted_date}, because are vacations")
return False
period = self.get_period(year=year, month=month)
current_period = period[0]
period_id = current_period['id']
payload = {
'clock_in': f'{start_hour}:{start_minute}',
'clock_out': f'{end_hour}:{end_minute}',
'day': day,
'period_id': period_id
}
response = self.session.post(self.SHIFT_URL, data=payload)
self.check_status_code(response.status_code, http_client.CREATED)
return True
def delete_worked_period(self, shift_id):
"""Delete a worked period
:param shift_id: integer
"""
url = f'{self.SHIFT_URL}/{shift_id}'
response = self.session.delete(url)
self.check_status_code(response.status_code, http_client.NO_CONTENT)
def modify_worked_period(self, shift_id, period_id, start_hour, start_minute, end_hour, end_minute):
"""Modify the clock in and clock out of a specific day
:param shift_id: integer
:param period_id: integer
:param start_hour: integer
:param start_minute: integer
:param end_hour: integer
:param end_minute: integer
"""
url = f'{self.SHIFT_URL}/{shift_id}'
payload = {
'clock_in': f"{start_hour}:{start_minute}",
'clock_out': f"{end_hour}:{end_minute}",
'period_id': period_id,
}
response = self.session.patch(url, data=payload)
self.check_status_code(response.status_code, http_client.OK)
def add_observation(self, shift_id, observation=None):
"""Add observation for a day
:param shift_id: integer
:param observation: string
"""
url = f'{self.SHIFT_URL}/{shift_id}'
payload = {
'observations': observation
}
response = self.session.patch(url=url, data=payload)
self.check_status_code(response.status_code, http_client.OK)
| StarcoderdataPython |
6609012 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-16 01:03
from __future__ import unicode_literals
import common.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('addresses', '0001_initial'),
('locations', '0004_auto_20171221_0046'),
]
operations = [
migrations.CreateModel(
name='AddressAtLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addresses.Address')),
('address_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addresses.AddressType')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location')),
],
bases=(common.models.ValidateOnSaveMixin, models.Model),
),
migrations.AddField(
model_name='location',
name='addresses',
field=models.ManyToManyField(related_name='locations', through='locations.AddressAtLocation', to='addresses.Address'),
),
]
| StarcoderdataPython |
1758256 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
def set_dropout_prob(p):
global dropout_p
dropout_p = p
def set_seq_dropout(option): # option = True or False
global do_seq_dropout
do_seq_dropout = option
def seq_dropout(x, p=0, training=False):
"""
x: batch * len * input_size
"""
if training == False or p == 0:
return x
dropout_mask = Variable(
1.0
/ (1 - p)
* torch.bernoulli((1 - p) * (x.data.new(x.size(0), x.size(2)).zero_() + 1)),
requires_grad=False,
)
return dropout_mask.unsqueeze(1).expand_as(x) * x
def dropout(x, p=0, training=False):
"""
x: (batch * len * input_size) or (any other shape)
"""
if do_seq_dropout and len(x.size()) == 3: # if x is (batch * len * input_size)
return seq_dropout(x, p=p, training=training)
else:
return F.dropout(x, p=p, training=training)
| StarcoderdataPython |
6481549 | import numpy as np
import random
from nltk.corpus import wordnet as wn
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('action', '', '')
def get_word(vocab=None, action = None, this_word=None):
if FLAGS.action == 'no_up' and action == 2:
return vocab.word2id(this_word)
if FLAGS.action == 'no_down' and action == 1:
return vocab.word2id(this_word)
if FLAGS.action == 'no_same' and action == 3:
return vocab.word2id(this_word)
if FLAGS.action == 'no_updownsame' and action == 4:
return vocab.word2id(this_word)
if not wn.synsets(this_word) or action==0 or len(this_word)<3:
return vocab.word2id(this_word)
elif action == 1:
word_candidate=[]
for _,sys in enumerate(wn.synsets(this_word)):
for hyp in sys.hyponyms():
for word in hyp.lemma_names():
if word in vocab._word2id and word not in this_word and this_word not in word:
word_candidate.append(word)
elif action ==2:
word_candidate=[]
for _, sys in enumerate(wn.synsets(this_word)): # for its every hyponyms()
for hyp in sys.hypernyms():
for word in hyp.lemma_names():
if word in vocab._word2id and word not in this_word and this_word not in word:
word_candidate.append(word)
elif action ==3:
word_candidate=[]
for _, sys in enumerate(wn.synsets(this_word)): # for its every hyponyms()
for word in sys.lemma_names():
if word in vocab._word2id and word not in this_word and this_word not in word:
word_candidate.append(word)
elif action ==4:
word_candidate=[]
for _, sys in enumerate(wn.synsets(this_word)):
for hyp in sys.hypernyms():
for down_hyp in hyp.hyponyms():
for word in down_hyp.lemma_names():
if word in vocab._word2id and word not in this_word and this_word not in word:
word_candidate.append(word)
if not word_candidate:
return vocab.word2id(this_word)
else:
return vocab.word2id(sorted(word_candidate)[0])
def mrpc_gene_new_sentence(vocab,
action_logit,
sentence,
sentence_len,
mode=None):
action_idx = np.argmax(action_logit,axis=2)
for sent_idx in range(sentence.shape[0]):
new_sent = sentence[sent_idx,:]
sent_action = action_idx[sent_idx,:]
first_sent_start_idx = 1
second_sent_end_idx = sentence_len[sent_idx]-2
for idx,word_id in enumerate(new_sent):
if word_id==102:
first_sent_end_idx = idx-1
second_sent_start_idx = idx+1
break
if mode==1:
for idx,this_action in enumerate(sent_action[first_sent_start_idx:first_sent_end_idx+1],start=first_sent_start_idx):
before_word = new_sent[idx]
candidate_word = get_word(vocab,this_action,vocab.id2word(new_sent[idx]))
if before_word != candidate_word:
for second_sent_idx,second_sent_word in enumerate(new_sent[second_sent_start_idx:second_sent_end_idx+1],start=second_sent_start_idx):
if second_sent_word == before_word:
new_sent[second_sent_idx] = candidate_word
elif mode==2:
for idx,this_action in enumerate(sent_action[second_sent_start_idx:second_sent_end_idx+1],start=second_sent_start_idx):
before_word = new_sent[idx]
candidate_word = get_word(vocab,this_action,vocab.id2word(new_sent[idx]))
if before_word != candidate_word:
for first_sent_idx,first_sent_word in enumerate(new_sent[first_sent_start_idx:first_sent_end_idx+1],start=first_sent_start_idx):
if first_sent_word == before_word:
new_sent[first_sent_idx] = candidate_word
return sentence
def single_sentence_generator(vocab=None,
action=None,
sentence=None,
sentence_len=None,
print_log=None):
# action_idx = np.argmax(action, axis=2)
action_list = []
for sent in action: # sample action from every position's probability
word_action=[]
for word in sent:
word_action.append(np.random.choice(list(range(len(word))), 1, replace=True, p=word)[0])
action_list.append(word_action)
action_idx=np.array(action_list)
for sent_idx in range(sentence.shape[0]):# for each sentence
new_sent = sentence[sent_idx,:]
sent_action = action_idx[sent_idx,:sentence_len[sent_idx]-1] #
# flags = 0
for idx,this_action in enumerate(sent_action): # for each action
candidate_word = get_word(vocab,this_action,vocab.id2word(new_sent[idx])) # return word idx
new_sent[idx] = candidate_word
return sentence,action_idx
| StarcoderdataPython |
11253936 | # Lib imports
import os
from flask import Flask, render_template, jsonify, request
from flask_alembic import Alembic
from flask_caching import Cache
from flask_compress import Compress
from flask_cors import CORS
from flask_language import Language, current_language
from flask_mail import Mail
from flask_marshmallow import Marshmallow
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from flask_sqlalchemy import SQLAlchemy
# from plaid import Client
# from elasticapm.contrib.flask import ElasticAPM
# from elasticsearch import Elasticsearch
from werkzeug.middleware.proxy_fix import ProxyFix
from config import config
# from keycloak import KeycloakOpenID, KeycloakAdmin
# from twilio.rest import Client as TwilioClient
# Library declarative.
db = SQLAlchemy()
ma = Marshmallow()
migrate = Migrate()
manager = Manager()
alembic = Alembic()
mail = Mail()
cache = Cache()
# celery = Celery()
# redis = Redis()
language = Language()
compress = Compress()
# apm = ElasticAPM()
Config = config[os.getenv('APP_ENV', "DEVELOPMENT")]
def create_app(environment):
# Init flask
app = Flask(__name__, static_folder='./static',
template_folder='./templates')
# Init flask configurations.
app.config.from_object(config[environment])
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
# Enabling the cross origin using the cors.
CORS(app)
# Attach celery
# celery.config_from_object(config[environment])
# Init flask Alembic
alembic.init_app(app)
# Init mailRedis
mail.init_app(app)
# Init SQLAlchemy.
with app.app_context():
db.init_app(app)
# Init Marshmallow.
with app.app_context():
ma.init_app(app)
# Init flask compress
with app.app_context():
compress.init_app(app)
# Init application migrations (Flask-Migrate and Flask-Script)
with app.app_context():
migrate.init_app(app, db)
with app.app_context():
manager.__init__(app)
manager.add_command('database', MigrateCommand)
# # Init Elasticsearch
# # with app.app_context():
# # es.init_app(app)
#
# Init Flask Cache
with app.app_context():
cache.init_app(app)
# Init Flask Redis
with app.app_context():
# redis.init_app(app)
cache.init_app(app)
# Init Flask apm for logging error on elasticsearch
# try:
# with app.app_context():
# apm.init_app(app)
# except Exception as e:
# print(str(e))
# Init Flask-Language
with app.app_context():
language.init_app(app)
@language.allowed_languages
def get_allowed_languages():
return ['en', 'fr']
@language.default_language
def get_default_language():
return 'en'
@app.route('/api/language')
def get_language():
return jsonify({
'language':str(current_language)
})
@app.route('/api/language', methods=['POST'])
def set_language():
req = request.get_json()
lang = req.get('language', None)
language.change_language(lang)
return jsonify({
'language': str(current_language)
})
# Importing and registering blueprints.
from .v1 import (v1)
app.register_blueprint(v1)
# Sample HTTP error handling
# Registering blueprints.
from .public import public_blueprint
app.register_blueprint(public_blueprint)
# Init server name to blueprint
# with app.app_context():
# assert url_for('api_v1.doc')
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('v1/404.html'), 404
return app
| StarcoderdataPython |
1943736 | <filename>test/e2e/main.py
import inspect
import os
import sys
import time
from difflib import Differ
from os.path import dirname
import argparse
import yaml
import requests
import time
from requests import Response
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
def validate(expected_file_name):
with open(expected_file_name) as expected_data_file:
expected_data = os.linesep.join(expected_data_file.readlines())
response = requests.post(url='http://0.0.0.0:12800/dataValidate', data=expected_data)
if response.status_code != 200:
res = requests.get('http://0.0.0.0:12800/receiveData')
actual_data = yaml.dump(yaml.load(res.content, Loader=Loader))
differ = Differ()
diff_list = list(differ.compare(
actual_data.splitlines(keepends=True),
yaml.dump(yaml.load(expected_data, Loader=Loader)).splitlines(keepends=True)
))
print('diff list: ')
sys.stdout.writelines(diff_list)
return response
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--expected_file', help='File name which includes expected reported value')
parser.add_argument('--max_retry_times', help='Max retry times', type=int)
parser.add_argument('--target_path', help='Specify target path')
args = parser.parse_args()
retry_times = 0
while True:
if retry_times > args.max_retry_times:
raise RuntimeError("Max retry times exceeded")
try:
requests.get('http://0.0.0.0:8081{0}'.format(args.target_path), timeout=5)
except Exception as e:
print(e)
retry_times += 1
time.sleep(2)
continue
res = validate(args.expected_file)
assert res.status_code == 200
break
| StarcoderdataPython |
1897899 | <filename>chb/cmdline/mips/chx_get_callgraph_paths.py
#!/usr/bin/env python3
# ------------------------------------------------------------------------------
# Access to the CodeHawk Binary Analyzer Analysis Results
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Script to extract paths from the application callgraph."""
import argparse
import chb.util.fileutil as UF
import chb.util.DotGraph as DG
import chb.util.dotutil as UD
import chb.app.Callgraph as CG
import chb.app.AppAccess as AP
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('filename',help='name of executable')
parser.add_argument('src',help='address of starting function')
parser.add_argument('--dst',help='name or address of destination function')
parser.add_argument('--countcfgpaths',help='count the numher of paths through cfgs',
action='store_true')
parser.add_argument('--graph',help='produce a graphical representation using dot',
action='store_true')
parser.add_argument('--reverse',help='reverse the call graph',action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse()
try:
(path,filename) = UF.get_path_filename('mips-elf',args.filename)
UF.check_analysis_results(path,filename)
except UF.CHBError as e:
print(str(e.wrap()))
exit(1)
app = AP.AppAccess(path,filename,mips=True)
if args.src.startswith('0x'):
if not app.has_function(args.src):
print('*' * 80)
print('No function found with address ' + args.src)
print('*' * 80)
exit(1)
callgraph = CG.Callgraph(app)
def getname(n):
if n.startswith('0x') and app.has_function_name(n):
return app.get_function_name(n) + ' (' + n + ')'
else:
return n
if args.reverse:
paths = callgraph.get_reverse_paths(args.src)
else:
paths = callgraph.get_paths(args.src,args.dst)
for p in paths:
print(', '.join(getname(n) for n in p))
pathcounts = {} # (src,dst) -> number of paths through src cfg to reach dst
callgraphpathlengths = {} # maximum length in basic blocks through all cfgs in path
if args.countcfgpaths:
for p in paths:
pname = '_'.join([ str(n) for n in p ])
callgraphpathlengths[pname] = 0
for i in range(len(p) - 1):
if (p[i],p[i+1]) in pathcounts: continue
f = app.get_function(p[i])
instrs = f.get_call_instructions_to_target(p[i+1])
blocks = [ instr.mipsblock.baddr for instr in instrs ]
for b in blocks:
bcfgpaths = f.cfg.get_paths(b)
callgraphpathlengths[pname] += max( [ len(bp.path) for bp in bcfgpaths ])
pathcounts.setdefault((p[i],p[i+1]),0)
pathcounts[(p[i],p[i+1])] += len(bcfgpaths)
for e in sorted(pathcounts):
print(str(e) + ': ' + str(pathcounts[e]))
for p in sorted(callgraphpathlengths):
print(str(p) + ': ' + str(callgraphpathlengths[p]))
if args.graph:
def getname(n):
if n.startswith('0x') and app.has_function_name(n):
return app.get_function_name(n)
else:
return n
def getcolor(n):
if n.startswith('0x'):
f = app.get_function(n)
if f.cfg.has_loops():
return 'red'
else:
return None
return None
dst = '_all_' if args.dst is None else args.dst
graphname = 'callgraph_' + args.src + '_' + dst
dotgraph = DG.DotGraph(graphname)
dotgraph.set_left_to_right()
for p in paths:
for i in range(len(p) - 1):
dotgraph.add_node(p[i],labeltxt=getname(p[i]),color=getcolor(p[i]))
dotgraph.add_node(p[i+1],labeltxt=getname(p[i+1]),color=getcolor(p[i+1]))
if (p[i],p[i+1]) in pathcounts:
labeltxt = str(pathcounts[(p[i],p[i+1])])
else:
labeltxt = None
dotgraph.add_edge(p[i],p[i+1],labeltxt=labeltxt)
pdffilename = UD.print_dot(app.path,filename,dotgraph)
print('~' * 80)
print('Restricted call graph for ' + filename + ' has been saved in '
+ pdffilename)
print('~' * 80)
| StarcoderdataPython |
8163800 | """initial revision
Revision ID: 352a78d78066
Revises:
Create Date: 2017-10-03 00:30:04.946456
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '352a78d78066'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('students',
sa.Column('student_id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=64), nullable=False),
sa.Column('last_name', sa.String(length=64), nullable=False),
sa.Column('avg_grade', sa.Float(precision=32), nullable=True),
sa.PrimaryKeyConstraint('student_id')
)
op.create_table('users',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('last_name', sa.String(length=64), nullable=True),
sa.Column('last_seen_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['student_id'], ['students.student_id'], ),
sa.PrimaryKeyConstraint('user_id')
)
op.create_table('submissions',
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.Column('hw_id', sa.Integer(), nullable=False),
sa.Column('ordinal', sa.Integer(), nullable=False),
sa.Column('submitted_at', sa.DateTime(), server_default=sa.text('NOW()'), nullable=False),
sa.Column('grade', sa.Float(precision=32), nullable=True),
sa.ForeignKeyConstraint(['student_id'], ['students.student_id'], ),
sa.PrimaryKeyConstraint('submission_id'),
sa.UniqueConstraint('hw_id', 'student_id', 'submission_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('submissions')
op.drop_table('users')
op.drop_table('students')
# ### end Alembic commands ###
| StarcoderdataPython |
6557100 | <filename>twitter-analytics/code/1-training_data_preparation/mturk/check_hit_status.py
import boto3
import os
import argparse
def get_args_from_command_line():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--HITId", type=str)
args = parser.parse_args()
return args
def main():
# Get args
args = get_args_from_command_line()
# Load mturk client
keys_path = '/scratch/mt4493/twitter_labor/twitter-labor-data/data/keys/mturk'
with open(os.path.join(keys_path, 'access_key_id.txt'), 'r') as f:
access_key_id = f.readline().strip()
with open(os.path.join(keys_path, 'secret_access_key.txt'), 'r') as f:
secret_access_key = f.readline().strip()
mturk = boto3.client('mturk',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
region_name='us-east-1',
endpoint_url='https://mturk-requester.us-east-1.amazonaws.com'
)
# Get hit status and print
response = mturk.get_hit(HITId=args.HITId)
for key, value in response['HIT'].items():
print(key, ' : ', value)
if __name__ == '__main__':
main() | StarcoderdataPython |
5095241 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AssociationRequestDto(Model):
"""AssociationRequestDto.
:param associated_dataset_id:
:type associated_dataset_id: str
:param associated_dataset_type:
:type associated_dataset_type: str
:param relation:
:type relation: str
"""
_attribute_map = {
'associated_dataset_id': {'key': 'associatedDatasetId', 'type': 'str'},
'associated_dataset_type': {'key': 'associatedDatasetType', 'type': 'str'},
'relation': {'key': 'relation', 'type': 'str'},
}
def __init__(self, associated_dataset_id=None, associated_dataset_type=None, relation=None):
super(AssociationRequestDto, self).__init__()
self.associated_dataset_id = associated_dataset_id
self.associated_dataset_type = associated_dataset_type
self.relation = relation
| StarcoderdataPython |
3322943 | #!/usr/bin/env python
from cement.core import foundation, controller
from lib import get_backend, set_backend
from monitor import *
from pprint import PrettyPrinter
pprint = PrettyPrinter(indent=2).pprint
backend = get_backend()
def my_split(p):
"""
splits args based on '=' delim
:return:
"""
if p is None: return {}
delim = '='
def mini_split(t):
splitted = t.split(delim)
if len(splitted)<2:
raise Exception("could not split '{0}' based on '{1}'".format(t, delim))
return splitted
return dict(map(mini_split, p))
# define an application base controller
class MyAppBaseController(controller.CementBaseController):
class Meta:
label = 'base'
description = "My Application does amazing things!"
# the arguments recieved from command line
arguments = [
(['-r', '--retrieve-monitoring'], dict(action='store_true', help='retrieve the monitroing metrics from their temp file')),
(['-m', '--metrics'], dict(action='store', help='the metrics to report', nargs='*')),
(['-b', '--backend'], dict(action='store', help='the backend configuration parameters', nargs='*')),
(['-e', '--experiment-name'], dict(action='store', help='the name of the reported experiment')),
(['-q', '--query'], dict(action='store', help='the query to execute in the backend storage system')),
(['-pp', '--plot-params'], dict(action='store', help='parameters of the plot', nargs='*')),
(['-dict',], dict(action='store_true', help='get the query result in a dict')),
(['-cm', '--collect-metrics'], dict(action='store_true', help='collect the metrics of an active monitoring process')),
(['-cs', '--collect-streaming-metrics'], dict(action='store_true', help='collect the metrics of an finished streaming experiment'))
]
@controller.expose(hide=True, aliases=['run'])
def default(self):
self.app.log.error('You need to choose one of the options, or -h for help.')
@controller.expose(help='show examples of execution')
def show_examples(self):
print \
"""
# set a sqlite reporting backend with a specific sqlite file
./reporter_cli.py set-backend -b backend=sqlitebackend file=my_database.db
# Report, for experiment 'my_experiment', some metrics and their values
./reporter_cli.py report -e my_experiment -m metric1=test metric2=2
# plot a timeline of metric 'my_metric'
./reporter_cli.py plot-query -q "select cast(strftime('%s',date) as long) , my_metric from my_table;" -pp xlabel=bull title='my title'
"""
@controller.expose(aliases=['set-backend'])
def set_reporting_backend(self):
self.app.log.info("Setting reporting back-end")
if self.app.pargs.backend:
conf = my_split(self.app.pargs.backend)
set_backend(conf)
else:
self.app.log.error('No backend conf specified')
@controller.expose()
def show_backend(self):
self.app.log.info("Showing reporting back-end")
print backend
@controller.expose(help="store the required params", aliases=['r'])
def report(self):
experiment = self.app.pargs.experiment_name
if not experiment:
self.app.log.error("No experiment name provided. Please use the -e/--experiment-name parameter ")
exit()
metrics ={}
cli_metrics = my_split(self.app.pargs.metrics) # metrics from cmd args
# metrics stored into a file in the past
file_metrics = collect_future_metrics()
streaming_metrics = ganglia_metrics = {}
if self.app.pargs.collect_streaming_metrics:
# wait for and collect the streaming metrics if required
streaming_metrics = collect_streaming_metrics()
if self.app.pargs.collect_metrics:
# collect ganglia monitoring metrics if required
ganglia_metrics = collect_ganglia_metrics()
# update the metrics variable so that common common entries (if any) follow the priority
# 1)cli 2)future file 3)streaming 4)ganglia
metrics.update(ganglia_metrics)
metrics.update(streaming_metrics)
metrics.update(file_metrics)
metrics.update(cli_metrics)
# report the metrics to the backend
backend.report_dict(experiment, metrics)
@controller.expose(help="execute a query to the backend and prints the results")
def query(self):
if self.app.pargs.dict:
res = backend.dict_query(self.app.pargs.experiment_name, self.app.pargs.query)
pprint(res)
else:
res = backend.query(self.app.pargs.experiment_name, self.app.pargs.query)
for r in res:
print r
@controller.expose(help="store some metrics in a local file so that they can be reported later")
def future_report(self):
metrics = my_split(self.app.pargs.metrics)
store_future_metrics(metrics)
@controller.expose(help="execute a query to the backend and plot the results")
def plot_query(self):
pparams = self.app.pargs.plot_params
if pparams is not None: pparams = my_split(self.app.pargs.plot_params)
else: pparams = {}
backend.plot_query(self.app.pargs.experiment_name, self.app.pargs.query, **pparams)
class MyApp(foundation.CementApp):
class Meta:
label = 'reporter'
base_controller = MyAppBaseController
with MyApp() as app:
app.run() | StarcoderdataPython |
9765719 | <gh_stars>10-100
import re
def get_content(soup):
"""Retrieves contents of the article"""
# heuristics
div_tags = soup.find_all('div', id='articleContentBody')
div_tags_2 = soup.find_all('div', class_='ArticleText')
div_tags_3 = soup.find_all('div', id='ArticleText')
div3 = soup.find_all('div', id='article_content')
div4 = soup.find_all('div', class_='articleBodyText')
div5 = soup.find_all('div', class_='story-container')
div_tags_l = soup.find_all('div', id=re.compile('article'))
div6 = soup.find_all('div', class_='kizi-honbun')
div7 = soup.find_all('div', class_='main-text')
rest = soup.find_all(id='articleText')
if div_tags:
return collect_content(div_tags)
elif div_tags_2:
return collect_content(div_tags_2)
elif div_tags_3:
return collect_content(div_tags)
elif div3:
return collect_content(div3)
elif div4:
return collect_content(div4)
elif div5:
return collect_content(div5)
elif div_tags_l and len(collect_content(div_tags_l)) > 0:
return collect_content(div_tags_l)
elif div6:
return collect_content(div6)
elif div7:
return collect_content(div7)
elif rest:
return collect_content(rest)
else:
# contingency
c_list = [v.text for v in soup.find_all('p') if len(v.text) > 0]
words_to_bans = ['<', 'javascript']
for word_to_ban in words_to_bans:
c_list = list(filter(lambda x: word_to_ban not in x.lower(), c_list))
clean_html_ratio_letters_length = 0.33
c_list = [t for t in c_list if
len(re.findall('[a-z]', t.lower())) / (
len(t) + 1) < clean_html_ratio_letters_length]
content = ' '.join(c_list)
content = content.replace('\n', ' ')
content = re.sub('\s\s+', ' ', content) # remove multiple spaces.
return content
def collect_content(parent_tag):
"""Collects all text from children p tags of parent_tag"""
content = ''
for tag in parent_tag:
p_tags = tag.find_all('p')
for tag in p_tags:
content += tag.text + '\n'
return content
def get_title(soup):
"""Retrieves Title of Article. Use Google truncated title trick instead."""
# Heuristics
div_tags = soup.find_all('div', class_='Title')
article_headline_tags = soup.find_all('h1', class_='article-headline')
headline_tags = soup.find_all('h2', id='main_title')
hl = soup.find_all(class_='Title')
all_h1_tags = soup.find_all('h1')
title_match = soup.find_all(class_=re.compile('title'))
Title_match = soup.find_all(class_=re.compile('Title'))
headline_match = soup.find_all(class_=re.compile('headline'))
item_prop_hl = soup.find_all(itemprop='headline')
if item_prop_hl:
return item_prop_hl[0].text
if div_tags:
for tag in div_tags:
h1Tag = tag.find_all('h1')
for tag in h1Tag:
if tag.text:
return tag.text
elif article_headline_tags:
for tag in article_headline_tags:
return tag.text
elif headline_tags:
for tag in headline_tags:
return tag.text
elif headline_match:
return headline_match[0].text
elif all_h1_tags:
return all_h1_tags[0].text
elif hl:
return hl[0].text
else:
if title_match:
return title_match[0].text
elif Title_match:
return Title_match[0].text
else:
return ""
| StarcoderdataPython |
6635616 | """
Defines the Location widget which allows changing the href of the window.
"""
import json
import urllib.parse as urlparse
import param
from ..models.location import Location as _BkLocation
from ..reactive import Syncable
from ..util import parse_query
from .state import state
class Location(Syncable):
"""
The Location component can be made available in a server context
to provide read and write access to the URL components in the
browser.
"""
href = param.String(readonly=True, doc="""
The full url, e.g. 'https://localhost:80?color=blue#interact'""")
hostname = param.String(readonly=True, doc="""
hostname in window.location e.g. 'panel.holoviz.org'""")
pathname = param.String(regex=r"^$|[\/].*$", doc="""
pathname in window.location e.g. '/user_guide/Interact.html'""")
protocol = param.String(readonly=True, doc="""
protocol in window.location e.g. 'http:' or 'https:'""")
port = param.String(readonly=True, doc="""
port in window.location e.g. '80'""")
search = param.String(regex=r"^$|\?", doc="""
search in window.location e.g. '?color=blue'""")
hash = param.String(regex=r"^$|#", doc="""
hash in window.location e.g. '#interact'""")
reload = param.Boolean(default=False, doc="""
Reload the page when the location is updated. For multipage
apps this should be set to True, For single page apps this
should be set to False""")
# Mapping from parameter name to bokeh model property name
_rename = {"name": None}
def __init__(self, **params):
super().__init__(**params)
self._synced = []
self._syncing = False
self.param.watch(self._update_synced, ['search'])
def _get_model(self, doc, root=None, parent=None, comm=None):
model = _BkLocation(**self._process_param_change(self._init_params()))
root = root or model
values = dict(self.param.get_param_values())
properties = list(self._process_param_change(values))
self._models[root.ref['id']] = (model, parent)
self._link_props(model, properties, doc, root, comm)
return model
def _get_root(self, doc=None, comm=None):
root = self._get_model(doc, comm=comm)
ref = root.ref['id']
state._views[ref] = (self, root, doc, comm)
self._documents[doc] = root
return root
def _cleanup(self, root):
if root.document in self._documents:
del self._documents[root.document]
ref = root.ref['id']
super()._cleanup(root)
if ref in state._views:
del state._views[ref]
def _update_synced(self, event=None):
if self._syncing:
return
query_params = self.query_params
for p, parameters, _, on_error in self._synced:
mapping = {v: k for k, v in parameters.items()}
mapped = {}
for k, v in query_params.items():
if k not in mapping:
continue
pname = mapping[k]
try:
v = p.param[pname].deserialize(v)
except Exception:
pass
try:
equal = v == getattr(p, pname)
except Exception:
equal = False
if not equal:
mapped[pname] = v
try:
p.param.set_param(**mapped)
except Exception:
if on_error:
on_error(mapped)
def _update_query(self, *events, query=None):
if self._syncing:
return
serialized = query or {}
for e in events:
matches = [ps for o, ps, _, _ in self._synced if o in (e.cls, e.obj)]
if not matches:
continue
owner = e.cls if e.obj is None else e.obj
try:
val = owner.param[e.name].serialize(e.new)
except Exception:
val = e.new
if not isinstance(val, str):
val = json.dumps(val)
serialized[matches[0][e.name]] = val
self._syncing = True
try:
self.update_query(**{k: v for k, v in serialized.items() if v is not None})
finally:
self._syncing = False
@property
def query_params(self):
return parse_query(self.search)
def update_query(self, **kwargs):
query = self.query_params
query.update(kwargs)
self.search = '?' + urlparse.urlencode(query)
def sync(self, parameterized, parameters=None, on_error=None):
"""
Syncs the parameters of a Parameterized object with the query
parameters in the URL. If no parameters are supplied all
parameters except the name are synced.
Arguments
---------
parameterized (param.Parameterized):
The Parameterized object to sync query parameters with
parameters (list or dict):
A list or dictionary specifying parameters to sync.
If a dictionary is supplied it should define a mapping from
the Parameterized's parameteres to the names of the query
parameters.
on_error: (callable):
Callback when syncing Parameterized with URL parameters
fails. The callback is passed a dictionary of parameter
values, which could not be applied.
"""
parameters = parameters or [p for p in parameterized.param if p != 'name']
if not isinstance(parameters, dict):
parameters = dict(zip(parameters, parameters))
watcher = parameterized.param.watch(self._update_query, list(parameters))
self._synced.append((parameterized, parameters, watcher, on_error))
self._update_synced()
query = {}
for p, name in parameters.items():
v = getattr(parameterized, p)
if v is None:
continue
try:
parameterized.param[p].serialize(v)
except Exception:
pass
if not isinstance(v, str):
v = json.dumps(v)
query[name] = v
self._update_query(query=query)
def unsync(self, parameterized, parameters=None):
"""
Unsyncs the parameters of the Parameterized with the query
params in the URL. If no parameters are supplied all
parameters except the name are unsynced.
Arguments
---------
parameterized (param.Parameterized):
The Parameterized object to unsync query parameters with
parameters (list or dict):
A list of parameters to unsync.
"""
matches = [s for s in self._synced if s[0] is parameterized]
if not matches:
ptype = type(parameterized)
raise ValueError(f"Cannot unsync {ptype} object since it "
"was never synced in the first place.")
synced = []
for p, params, watcher, on_error in self._synced:
if parameterized is p:
parameterized.param.unwatch(watcher)
if parameters is not None:
new_params = {p: q for p, q in params.items()
if p not in parameters}
new_watcher = parameterized.param.watch(watcher.fn, list(new_params))
synced.append((p, new_params, new_watcher, on_error))
else:
synced.append((p, params, watcher, on_error))
self._synced = synced
| StarcoderdataPython |
9794914 | <gh_stars>100-1000
from typing import Text, Any, Dict, Optional, Union, List
import aiohttp
ANY_DATA = Optional[Dict[Any, Any]]
async def get(url: Text, params: ANY_DATA = None) -> Union[List[Dict], Dict[Any, Any]]:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as resp:
return await resp.json()
async def post(url: Text, data: ANY_DATA = None, params: ANY_DATA = None) \
-> Union[List[Dict], Dict[Any, Any]]:
async with aiohttp.ClientSession() as session:
async with session.post(url, params=params, data=data) as resp:
return await resp.json()
| StarcoderdataPython |
3240592 | # -*- coding: utf-8 -*-
"""
sphinx.directives.patches
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
class Figure(images.Figure):
"""The figure directive which applies `:name:` option to the figure node
instead of the image node.
"""
def run(self):
name = self.options.pop('name', None)
result = images.Figure.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
(figure_node,) = result
if name:
self.options['name'] = name
self.add_name(figure_node)
# fill lineno using image node
if figure_node.line is None and len(figure_node) == 2:
figure_node.line = figure_node[1].line
return [figure_node]
directives.register_directive('figure', Figure)
| StarcoderdataPython |
3259237 | from django.urls import path, include
from . import views
urlpatterns = [
# Home Page URLs
path('', views.home, name="home"),
path(r'^logout/$', views.logoutUser, name="logout"),
path('about/', views.about, name="about"),
# Registrations
path('customer-registration/', views.cusRegister,
name="customer-registration"),
path('restaurant-registration/', views.resRegister,
name="restaurant-registration"),
# login Pages
path('res-login/', views.reslogin, name="reslogin"),
path('cus-login/', views.cuslogin, name="cuslogin"),
path('restaurant/', include('restaurants.urls')),
path('customer/', include('customers.urls')),
]
| StarcoderdataPython |
3329486 | <reponame>RBGKew/PAFTOL_Validation_Pipeline
#!/usr/bin/env python
# coding: utf-8
# # GetOrganelles Cleaning
##################################
# Author: <NAME>
# Copyright © 2020 The Board of Trustees of the Royal Botanic Gardens, Kew
##################################
# In[1]:
import os
import shutil
import sys
import argparse
from Bio import SeqIO
# In[ ]:
parser = argparse.ArgumentParser(
description='Process GetOrganelles results and clean tmp folders')
parser.add_argument("--path", type=str, help="path of folder to process")
args = parser.parse_args()
path = args.path
print('path to folder:',path)
Sample = path.split('/')[-2].replace('_pt','').replace('_nr','')
print('Sample:',Sample)
# In[32]:
org = path.split('/')[-2].split('_')[-1]
if org in ['pt','nr']:
print('organelle:',org)
else:
print('wrong parsing of organelle:',org)
sys.exit()
# ## Copy best organelle fasta
# In[14]:
fasta_files = [ifile for ifile in os.listdir(path) if ifile.endswith('.fasta')]
if len(fasta_files)==1:
print('1 fasta file:',fasta_files[0])
shutil.copyfile(path + fasta_files[0], 'fasta_' + org + '/' + Sample + '_' + org + '.fasta')
elif len(fasta_files)>1:
print('found',len(fasta_files),'fasta files')
best_fasta=''
best_len=0
for ifasta in fasta_files:
sum_len=0
for record in SeqIO.parse(path + ifasta, "fasta"):
sum_len += len(record.seq)
if sum_len>best_len:
best_len=sum_len
best_fasta=ifasta
shutil.copyfile(path + best_fasta, 'fasta_' + org + '/' + Sample + '_' + org + '.fasta')
else:
print('either no fasta or error, exiting.')
sys.exit()
# ## Remove temp files
# In[7]:
rm_dirs=['filtered_spades','seed']
rm_files=['filtered_1_paired.fq.tar.gz','filtered_2_paired.fq.tar.gz',
'filtered_1_unpaired.fq.tar.gz','filtered_2_unpaired.fq.tar.gz']
for idir in rm_dirs:
rm_dir = path + idir
print(rm_dir)
if os.path.isdir(rm_dir):
shutil.rmtree(rm_dir)
for ifile in rm_files:
rm_file = path + ifile
print(rm_file)
if os.path.isfile(rm_file):
os.remove(rm_file)
# ## Compress and remove folder
# In[ ]:
zip_path='Archives/' + Sample + '_' + org + '.gz'
os.system('tar -zcvf ' + zip_path + ' ' + path)
if os.path.isfile(zip_path):
print('compressed succesfully to',zip_path,', removing folder',path)
shutil.rmtree(path)
| StarcoderdataPython |
8181564 | <reponame>juangea/B28_boneMaster
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
def rna_idprop_ui_get(item, create=True):
try:
return item['_RNA_UI']
except:
if create:
item['_RNA_UI'] = {}
return item['_RNA_UI']
else:
return None
def rna_idprop_ui_del(item):
try:
del item['_RNA_UI']
except KeyError:
pass
def rna_idprop_quote_path(prop):
return "[\"%s\"]" % prop.replace("\"", "\\\"")
def rna_idprop_ui_prop_update(item, prop):
prop_path = rna_idprop_quote_path(prop)
prop_rna = item.path_resolve(prop_path, False)
if isinstance(prop_rna, bpy.types.bpy_prop):
prop_rna.update()
def rna_idprop_ui_prop_get(item, prop, create=True):
rna_ui = rna_idprop_ui_get(item, create)
if rna_ui is None:
return None
try:
return rna_ui[prop]
except:
rna_ui[prop] = {}
return rna_ui[prop]
def rna_idprop_ui_prop_clear(item, prop, remove=True):
rna_ui = rna_idprop_ui_get(item, False)
if rna_ui is None:
return
try:
del rna_ui[prop]
except KeyError:
pass
if remove and len(item.keys()) == 1:
rna_idprop_ui_del(item)
def rna_idprop_context_value(context, context_member, property_type):
space = context.space_data
if space is None or isinstance(space, bpy.types.SpaceProperties):
pin_id = space.pin_id
else:
pin_id = None
if pin_id and isinstance(pin_id, property_type):
rna_item = pin_id
context_member = "space_data.pin_id"
else:
rna_item = eval("context." + context_member)
return rna_item, context_member
def rna_idprop_has_properties(rna_item):
keys = rna_item.keys()
nbr_props = len(keys)
return (nbr_props > 1) or (nbr_props and '_RNA_UI' not in keys)
def rna_idprop_ui_prop_default_set(item, prop, value):
defvalue = None
try:
prop_type = type(item[prop])
if prop_type in {int, float}:
defvalue = prop_type(value)
except KeyError:
pass
if defvalue:
rna_ui = rna_idprop_ui_prop_get(item, prop, True)
rna_ui["default"] = defvalue
else:
rna_ui = rna_idprop_ui_prop_get(item, prop)
if rna_ui and "default" in rna_ui:
del rna_ui["default"]
def rna_idprop_ui_create(
item, prop, *, default,
min=0.0, max=1.0,
soft_min=None, soft_max=None,
description=None,
overridable=False,
):
"""Create and initialize a custom property with limits, defaults and other settings."""
proptype = type(default)
# Sanitize limits
if proptype is bool:
min = soft_min = False
max = soft_max = True
if soft_min is None:
soft_min = min
if soft_max is None:
soft_max = max
# Assign the value
item[prop] = default
rna_idprop_ui_prop_update(item, prop)
# Clear the UI settings
rna_ui_group = rna_idprop_ui_get(item, True)
rna_ui_group[prop] = {}
rna_ui = rna_ui_group[prop]
# Assign limits and default
if proptype in {int, float, bool}:
# The type must be exactly the same
rna_ui["min"] = proptype(min)
rna_ui["soft_min"] = proptype(soft_min)
rna_ui["max"] = proptype(max)
rna_ui["soft_max"] = proptype(soft_max)
if default:
rna_ui["default"] = default
# Assign other settings
if description is not None:
rna_ui["description"] = description
prop_path = rna_idprop_quote_path(prop)
item.property_overridable_static_set(prop_path, overridable)
return rna_ui
def draw(layout, context, context_member, property_type, use_edit=True):
def assign_props(prop, val, key):
prop.data_path = context_member
prop.property = key
try:
prop.value = str(val)
except:
pass
rna_item, context_member = rna_idprop_context_value(context, context_member, property_type)
# poll should really get this...
if not rna_item:
return
from bpy.utils import escape_identifier
if rna_item.id_data.library is not None:
use_edit = False
assert(isinstance(rna_item, property_type))
items = rna_item.items()
items.sort()
if use_edit:
row = layout.row()
props = row.operator("wm.properties_add", text="Add")
props.data_path = context_member
del row
show_developer_ui = context.preferences.view.show_developer_ui
rna_properties = {prop.identifier for prop in rna_item.bl_rna.properties if prop.is_runtime} if items else None
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
for key, val in items:
if key == '_RNA_UI':
continue
is_rna = (key in rna_properties)
# only show API defined for developers
if is_rna and not show_developer_ui:
continue
to_dict = getattr(val, "to_dict", None)
to_list = getattr(val, "to_list", None)
# val_orig = val # UNUSED
if to_dict:
val = to_dict()
val_draw = str(val)
elif to_list:
val = to_list()
val_draw = str(val)
else:
val_draw = val
row = flow.row(align=True)
box = row.box()
if use_edit:
split = box.split(factor=0.75)
row = split.row(align=True)
else:
row = box.row(align=True)
row.alignment = 'RIGHT'
row.label(text=key, translate=False)
# explicit exception for arrays.
if to_dict or to_list:
row.label(text=val_draw, translate=False)
else:
if is_rna:
row.prop(rna_item, key, text="")
else:
row.prop(rna_item, '["%s"]' % escape_identifier(key), text="")
if use_edit:
row = split.row(align=True)
if not is_rna:
props = row.operator("wm.properties_edit", text="Edit")
assign_props(props, val_draw, key)
props = row.operator("wm.properties_remove", text="", icon='REMOVE')
assign_props(props, val_draw, key)
else:
row.label(text="API Defined")
del flow
class PropertyPanel:
"""
The subclass should have its own poll function
and the variable '_context_path' MUST be set.
"""
bl_label = "Custom Properties"
bl_options = {'DEFAULT_CLOSED'}
bl_order = 1000 # Order panel after all others
@classmethod
def poll(cls, context):
rna_item, _context_member = rna_idprop_context_value(context, cls._context_path, cls._property_type)
return bool(rna_item)
"""
def draw_header(self, context):
rna_item, context_member = rna_idprop_context_value(context, self._context_path, self._property_type)
tot = len(rna_item.keys())
if tot:
self.layout().label(text="%d:" % tot)
"""
def draw(self, context):
draw(self.layout, context, self._context_path, self._property_type)
| StarcoderdataPython |
393619 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage YVertical.
# <NAME>, <EMAIL>
#
#
from ppmessage.core.imageconverter import ImageConverter
from ppmessage.core.audioconverter import AudioConverter
from ppmessage.core.constant import REDIS_DISPATCHER_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_ACK_NOTIFICATION_KEY
from ppmessage.core.constant import MESSAGE_MAX_TEXT_LEN
from ppmessage.core.constant import MESSAGE_SUBTYPE
from ppmessage.core.constant import MESSAGE_TYPE
from ppmessage.core.constant import THUMBNAIL_HEIGHT
from ppmessage.core.constant import THUMBNAIL_WIDTH
from ppmessage.core.constant import CONVERSATION_TYPE
from ppmessage.core.constant import CONVERSATION_STATUS
from ppmessage.core.constant import PCSOCKET_SRV
from ppmessage.core.constant import TASK_STATUS
from ppmessage.core.constant import DIS_WHAT
from ppmessage.core.constant import YVOBJECT
from ppmessage.db.models import MessagePushTask
from ppmessage.db.models import VideoMaterialInfo
from ppmessage.db.models import ConversationInfo
from ppmessage.db.models import ConversationUserData
from ppmessage.db.models import FileInfo
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import DeviceUser
from ppmessage.core.utils.filemanager import create_file_with_data
from ppmessage.core.utils.filemanager import read_file
from ppmessage.core.redis import redis_hash_to_dict
import json
import uuid
import time
import logging
import datetime
from PIL import Image
class Proc():
def __init__(self, _app):
self._redis = _app.redis
# self._file_refs = _app.file_refs
return
def check(self, _body):
self._body = _body
if not isinstance(_body, dict):
self._body = json.loads(_body)
self._app_uuid = self._body.get("app_uuid")
self._uuid = self._body.get("uuid")
self._to_type = self._body.get("to_type")
self._to_uuid = self._body.get("to_uuid")
self._from_type = self._body.get("from_type")
self._from_uuid = self._body.get("from_uuid")
self._conversation_uuid = self._body.get("conversation_uuid")
self._conversation_type = self._body.get("conversation_type")
self._message_body = self._body.get("message_body")
self._from_device_uuid = self._body.get("device_uuid")
self._message_type = self._body.get("message_type")
self._message_subtype = self._body.get("message_subtype")
self._pcsocket = self._body.get("pcsocket")
if self._uuid == None or \
self._to_type == None or \
self._to_uuid == None or \
self._from_type == None or \
self._from_uuid == None or \
self._conversation_uuid == None or \
self._message_type == None or \
self._message_subtype == None or \
self._message_body == None:
logging.error("send message failed for input.")
return False
return True
def parse(self):
self._message_type = self._message_type.upper()
self._message_subtype = self._message_subtype.upper()
if isinstance(self._message_body, unicode):
self._message_body = self._message_body.encode("utf-8")
if self._message_subtype == MESSAGE_SUBTYPE.TEXT:
if len(self._message_body) > MESSAGE_MAX_TEXT_LEN:
_fid = create_file_with_data(self._redis, self._message_body, "text/plain", self._from_uuid)
self._message_body = json.dumps({"fid": _fid})
return True
elif self._message_subtype == MESSAGE_SUBTYPE.TXT:
_fid = self._parseTxt(self._message_body)
if _fid == None:
return False
self._message_body = json.dumps({"fid": _fid})
return True
elif self._message_subtype == MESSAGE_SUBTYPE.AUDIO:
_audio = self._parseAudio(self._message_body)
if _audio == None:
return False
self._message_body = _audio
return True
elif self._message_subtype == MESSAGE_SUBTYPE.IMAGE:
_image = self._parseImage(self._message_body)
if _image == None:
return False
self._message_body = json.dumps(_image)
return True
elif self._message_subtype == MESSAGE_SUBTYPE.VIDEO:
_video = self._parseVideo(self._message_body)
if _video == None:
return False
self._message_body = json.dumps(_video)
return True
elif self._message_subtype == MESSAGE_SUBTYPE.DOCUMENT:
_document = self._parseDocument(self._message_body)
if _document == None:
return False
self._message_body = json.dumps(_document)
return True
elif self._message_subtype == MESSAGE_SUBTYPE.FILE:
_generic = self._parseFile(self._message_body)
if _generic == None:
return False
self._message_body = json.dumps(_generic)
return True
else:
logging.error("unsupport message: %s" % self._body)
return False
return True
def save(self):
_task = {
"uuid": self._uuid,
"app_uuid": self._app_uuid,
"conversation_uuid": self._conversation_uuid,
"conversation_type": self._conversation_type,
"message_type": self._message_type,
"message_subtype": self._message_subtype,
"from_uuid": self._from_uuid,
"from_type": self._from_type,
"from_device_uuid": self._from_device_uuid,
"to_uuid": self._to_uuid,
"to_type": self._to_type,
"body": self._message_body,
"task_status": TASK_STATUS.PENDING,
}
_row = MessagePushTask(**_task)
_row.async_add()
_row.create_redis_keys(self._redis)
_row = ConversationInfo(uuid=self._conversation_uuid, status=CONVERSATION_STATUS.OPEN, latest_task=self._uuid)
_row.async_update()
_row.update_redis_keys(self._redis)
_m = {"task_uuid": self._uuid}
self._redis.rpush(REDIS_DISPATCHER_NOTIFICATION_KEY, json.dumps(_m))
_key = ConversationUserData.__tablename__ + ".conversation_uuid." + self._conversation_uuid + ".datas"
_datas = self._redis.smembers(_key)
for _data_uuid in _datas:
_row = ConversationUserData(uuid=_data_uuid, conversation_status=CONVERSATION_STATUS.OPEN)
_row.async_update()
_row.update_redis_keys(self._redis)
# for message routing algorithm
self._user_latest_send_message_time()
return
def _user_latest_send_message_time(self):
_now = datetime.datetime.now()
_row = DeviceUser(uuid=self._from_uuid, latest_send_message_time=_now)
_row.async_update()
return
def _parseTxt(self, _body):
_txt = json.loads(_body)
return _txt.get("fid")
def _parseImage(self, _body):
_image = json.loads(_body)
_fid = _image.get("fid")
_mime = _image.get("mime")
if _fid == None or _mime == None:
logging.error("Error for message body of image message")
return None
_mime = _mime.lower()
if _mime not in ["image/jpg", "image/jpeg", "image/png", "image/gif"]:
logging.error("Error for not supported mime=%s." % (_mime))
return None
_file = redis_hash_to_dict(self._redis, FileInfo, _fid)
if _file == None:
logging.error("Error for no file in redis: %s" % _fid)
return None
_image = None
try:
# raise IOError when file not image
_image = Image.open(_file["file_path"])
except:
pass
finally:
if _image == None:
logging.error("PIL can not identify the file_id=%s, not image." % (_fid))
return None
_image_width, _image_height = _image.size
_thum_width = _image_width
_thum_height = _image_height
if _image.format == "GIF":
return {"thum":_fid, "orig":_fid, "mime":"image/gif", "orig_width": _image_width, "orig_height": _image_height, "thum_width": _thum_width, "thum_height": _thum_height}
_thum_format = "JPEG"
if _image.format == "PNG":
_thum_format = "PNG"
_thum_image_info = ImageConverter.thumbnailByKeepImage(_image, _thum_format)
_thum_data = _thum_image_info["data"]
_thum_image = _thum_image_info["image"]
if _thum_data == None:
logging.error("Error for thumbnail image")
return None
_thum_id = create_file_with_data(self._redis, _thum_data, _mime, self._from_uuid)
_thum_width, _thum_height = _thum_image.size
# where assume the _thum must be jpeg
return {"thum":_thum_id, "orig":_fid, "mime":_mime, "orig_width": _image_width, "orig_height": _image_height, "thum_width": _thum_width, "thum_height": _thum_height}
def _parseAudio(self, _body):
_redis = self._redis
_audio = json.loads(_body)
_duration = _audio.get("dura")
_mime = _audio.get("mime")
_fid = _audio.get("fid")
if _duration == None or _fid == None or _mime == None:
logging.error("Error parse audio message body failed.")
return None
# m4a is from/for iOS
# amr is from/for android
# mp3 is for PC
_data = read_file(_redis, _fid)
if _data == None:
logging.error("Error no audio data %s." % (_fid))
return None
_mp3 = None
if _mime == "audio/m4a":
_m4a = AudioConverter.m4a2m4a(_data)
_amr = AudioConverter.m4a2amr(_data)
_mp3 = AudioConverter.m4a2mp3(_data)
_fid_m4a = create_file_with_data(_redis, _m4a, "audio/m4a", self._from_uuid)
_fid_amr = create_file_with_data(_redis, _amr, "audio/amr", self._from_uuid)
_fid_mp3 = create_file_with_data(_redis, _mp3, "audio/mp3", self._from_uuid)
if _mime == "audio/amr":
_m4a = AudioConverter.amr2m4a(_data)
_amr = _data
_mp3 = AudioConverter.amr2mp3(_data)
_fid_m4a = create_file_with_data(_redis, _m4a, "audio/m4a", self._from_uuid)
_fid_amr = _fid
_fid_mp3 = create_file_with_data(_redis, _mp3, "audio/mp3", self._from_uuid)
if _mp3 == None:
logging.error("Error no audio converter for mime=%s." % (_mime))
return None
if _fid_m4a == None:
logging.error("Error to create m4a file with data, len=%d." % len(_m4a))
return None
if _fid_amr == None:
logging.error("Error to create amr file with data, len=%d." % len(_amr))
return None
if _fid_mp3 == None:
logging.error("Error to create mp3 file with data, len=%d." % len(_mp3))
return None
return json.dumps({
"m4a": {"dura": _duration, "fid": _fid_m4a},
"amr": {"dura": _duration, "fid": _fid_amr},
"mp3": {"dura": _duration, "fid": _fid_mp3}
})
def _parseVideo(self, _body):
_video = json.loads(_body)
_mid = _video.get("mid")
# mid material uuid
if _mid == None:
logging.error("Error for message body of video message")
return None
_info = redis_hash_to_dict(self._redis, VideoMaterialInfo, _mid)
if _info == None:
logging.error("Error for no video materal info, uuid=%s." % (_mid))
return None
_tid = _info["cover_thumbnail_file_uuid"]
_fid = _info["video_file_uuid"]
_dura = _info["duration"]
_info = redis_hash_to_dict(self._redis, FileInfo, _fid)
if _info == None:
logging.error("Error for no video file info, uuid=%s." % (_fid))
return None
_mime = _info["file_mime"]
_size = _info["file_size"]
_name = _info["file_name"]
_r = {
"thum": _thum,
"orig": _video_file,
"dura": _dura,
"mime": _mime,
"size": _size,
"name": _name,
}
return _r
def _parseDocument(self, _body):
_document = json.loads(_body)
_fid = _document.get("fid")
if _fid == None:
logging.error("Error for message body of document message")
return None
_info = redis_hash_to_dict(self._redis, FileInfo, _fid)
if _info == None:
logging.error("Error for no document file info, uuid=%s." % (_fid))
return None
_mime = _info["file_mime"]
_size = _info["file_size"]
_name = _info["file_name"]
_r = {
"fid": _fid,
"mime": _mime,
"size": _size,
"name": _name,
}
return _r
def _parseFile(self, _body):
_generic = json.loads(_body)
_fid = _generic.get("fid")
if _fid == None:
logging.error("Error for message body of generic file message")
return None
_info = redis_hash_to_dict(self._redis, FileInfo, _fid)
if _info == None:
logging.error("Error for no file info, uuid=%s." % (_fid))
return None
_r = {
"fid": _fid,
"mime": _info.get("file_mime"),
"size": _info.get("file_size"),
"name": _generic.get("name") or _info.get("file_name"),
}
return _r
def ack(self, _code):
if self._pcsocket == None:
return
_host = self._pcsocket.get("host")
_port = self._pcsocket.get("port")
_device_uuid = self._pcsocket.get("device_uuid")
if _host == None or _port == None or _device_uuid == None:
return
_body = {
"device_uuid": _device_uuid,
"what": DIS_WHAT.SEND,
"code": _code,
"extra": {"uuid": self._uuid, "conversation_uuid": self._conversation_uuid},
}
_key = REDIS_ACK_NOTIFICATION_KEY + ".host." + _host + ".port." + _port
self._redis.rpush(_key, json.dumps(_body))
return
| StarcoderdataPython |
11282355 | import sys
from basket import Basket
from product_store import ProductStore
from offer_store import OfferStore
class CheckoutSystem(object):
def __init__(self):
self._product_store = ProductStore.load_from_config_file()
self._offer_store = OfferStore.load_from_config_file()
self._basket = None
def _handle_add_item_to_basket(self):
""" adds an item in the products to basket"""
if not self._basket:
self._basket = Basket(self._product_store, self._offer_store)
item = input("""
Enter Product Id:{}""".format(
self._product_store.product_ids()))
if not self._basket.add(item):
print("Error: Invalid Id")
def _handle_view_basket(self):
""" Displays current bill, if items exists """
self._print_receipt()
def _handle_checkout_basket(self):
self._print_receipt()
self._basket = None
def _print_receipt(self):
if self._basket:
response = self._basket.status()
header = """
\n```
Item\t\tPrice
----\t\t-----
"""
footer = "```"
total = 0
print(header)
for item in response:
total = total + item['price_after_discount']
code = item['code']
offer_name = item['offer_name']
quantity = item['quantity']
discount = item['discount']
price = item['price']
items_discounted = item['items_discounted']
for i in range(items_discounted):
print("{}\t\t {}".format(code, price))
print("\t{}\t-{}".format(offer_name, discount))
for i in range(quantity-items_discounted):
print("{}\t\t {}".format(code, price))
print("--------------------------------")
print("\t\t {}".format(round(total,2)))
print(footer)
else:
print("Info: Nothing in Basket")
def _menu(self):
""" Main Menu For Farmers Market """
print()
choice = input("""
Farmers Market Checkout System)
A: Add item
V: View Basket
C: Checkout
Q: Quit/Log Out
Please enter your choice: """)
if choice == 'A' or choice == 'a':
self._handle_add_item_to_basket()
elif choice == 'V' or choice == 'v':
self._handle_view_basket()
elif choice == 'C' or choice == 'c':
self._handle_checkout_basket()
elif choice == 'Q' or choice == 'q':
sys.exit(0)
def start(self):
while True:
self._menu()
if __name__ == '__main__':
system = CheckoutSystem()
system.start()
| StarcoderdataPython |
3415959 | <filename>passivehunter.py
'''
Author : <NAME>
Note : I initially made this script for my personal use , Now making it public.
I did not cared much about best coding practices as it was just a personal script
'''
from status_check import make_requests
from error_handling import error_handler
from collections import Counter
import os
import requests
import time
import re
import sys
import pathlib
import asyncio
def main(url , domain):
# a small try and except block for error handling
try:
r = requests.get(url)
r.raise_for_status()
except requests.exceptions.HTTPError as err:
print(err)
exit(0)
# filtering out the domain/subdomains using my ugly regex skills
pattern = r',.*.'+domain+'",' # an example of ugly regex
raw_domains = re.findall(pattern,r.text)
temp_domains = []
for i in raw_domains:
temp_domains.append(i[1:-2])
# using Counter for removing the duplicate entries of domains if any.
cnt = Counter(temp_domains)
print("\u001b[32m[!] Total No of unique domains/subdomains found : " + str(len(cnt)) + "\n\u001b[0m")
urls = []
# for storing https and http urls
print("\u001b[34;1m")
for i,j in cnt.items():
print(i)
urls.append("https://"+i) #appending https
urls.append("http://"+i) #appending http
print("\n\n")
print("\u001b[0m")
'''
if file already exists empty the file , it happens when you run the script againts same domain
multiple times
'''
with open(domain+'-200.txt', 'w') as empty:
empty.write('')
with open(domain+'-other.txt', 'w') as empty:
empty.write('')
with open(domain+'.txt', 'w') as empty:
empty.write('')
for i in urls:
with open(domain+'.txt', 'a') as f:
f.write(i+"\n")
# if no subdomains found , then exit the program and delete the empty files
if len(cnt)==0:
os.remove(domain+'.txt')
os.remove(domain+'-other.txt')
os.remove(domain+'-200.txt')
sys.exit()
if __name__ == "__main__":
if os.name =='nt':
os.system("cls") #clear screen
num_lines = 0
# banner below
banner = """
\u001b[35;1m
_ _ _
___ ___ ___ _ __|_|_ _ ___| |_ _ _ ___| |_ ___ ___
| . | .'|_ -|_ -| | | | -_| | | | | _| -_| _|
| _|__,|___|___|_|\_/|___|_|_|___|_|_|_| |___|_|
|_|\u001b[0m
\u001b[42;1m-coded with <3 by <NAME>\u001b[0m
"""
print(banner)
# checks if the supplied command argument is valid
if len(sys.argv)!=2:
print("\u001b[31;1m[!] Usage : python3 passivehunter.py domainname.tld\u001b[0m")
sys.exit(1)
domain = sys.argv[1]
assert sys.version_info >= (3, 7), "Script requires Python 3.7+."
url = "https://dns.bufferover.run/dns?q=." + domain
# a request is made to the host , that will check for errors (error handling)
error_handler(url) #function imported from error_handling.py
main(url , domain)
| StarcoderdataPython |
46694 | <reponame>kaniblu/enwiki-mysql
# encoding: utf-8
from __future__ import unicode_literals
import re
import string
import six
import gensim
class WikiBodyFilter(object):
"""Generic wikipedia article filter
Strips off illegal characters and markups. Borrows some basic logic from
gensim utils.
"""
def __init__(self, remove_html=True, valid_unicodes=(), invalid_unicodes=()):
self.valid_unicodes = valid_unicodes
self.invalid_unicodes = invalid_unicodes
self.remove_html = remove_html
self.uni_patterns = []
if valid_unicodes:
valids = []
for s, e in valid_unicodes:
s_str = six.unichr(s)
e_str = six.unichr(e)
valids.append("{}-{}".format(s_str, e_str))
valid_pat = re.compile(r"[^{}]".format("".join(valids)),
re.UNICODE)
self.uni_patterns.append(valid_pat)
if invalid_unicodes:
invalids = []
for s, e in invalid_unicodes:
s_str = six.unichr(s)
e_str = six.unichr(e)
invalids.append("{}-{}".format(s_str, e_str))
invalid_pat = re.compile(r"[{}]".format("".join(invalids)),
re.UNICODE)
self.uni_patterns.append(invalid_pat)
dbws_pat = re.compile(r"(\s)\s*")
self.dbws_pattern = dbws_pat
def __call__(self, text):
text = gensim.utils.to_unicode(text, "utf8", errors="ignore")
if self.remove_html:
text = gensim.utils.decode_htmlentities(text)
text = gensim.corpora.wikicorpus.remove_markup(text)
for pat in self.uni_patterns:
text = pat.sub("", text)
text = self.dbws_pattern.sub(r"\g<1>", text)
return text | StarcoderdataPython |
5027140 | """add url column to movies table
Revision ID: 51eac6822fe3
Revises: <PASSWORD>
Create Date: 2017-12-15 10:50:31.866000
"""
# revision identifiers, used by Alembic.
revision = '51eac6822fe3'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('movies', sa.Column('url', sa.String(2083), nullable=True))
def downgrade():
op.drop_column('movies', 'url')
| StarcoderdataPython |
3545218 | <gh_stars>1-10
from simulatedisease import *
import numpy.random
import matplotlib.pylab
from optimise_stops import *
if __name__=='__main__':
save_figures = True
numpy.random.seed(2)
field_size = [100, 100]
num_groups = 2
num_stops = 20
origin = np.array([50,50])
opts = {}
'''
opts['longitude_limit'] = (0.,99.)
opts['latitude_limit'] = (0.,99.)
opts['mapwidth'] = 100
opts['mapheight'] = 100
opts['nsamples_per_stop'] = 10
opts['num_groups'] = 3
opts['total_survey_vertices'] = 30
opts['initial_samples'] = 5
opts['nsets_of_points'] = 1
opts['stops_per_group'] = 15
opts['num_disease_categories'] = 2
# simulate a new survey
Preal,tmp = simulate_disease([opts['mapheight'],opts['mapwidth']])
routes = simulate_routes([opts['mapheight'],opts['mapwidth']], opts['num_groups'], opts['total_survey_vertices'])
# sample data at the origin
D = take_real_samples(Preal,origin,opts['initial_samples'])
# if any categories are missing from the sample, add them here
missing_categories = np.array([np.setdiff1d(np.arange(1,opts['num_disease_categories']+1),D[0,:])])
D = np.hstack((D,missing_categories))
X = np.tile(origin,(opts['initial_samples']+len(missing_categories[0]),1))
# initial GP estimate given measurements at origin
P, M, S = gpor_2D_grid(X, D, opts['longitude_limit'], opts['latitude_limit'], opts['mapwidth'], opts['mapheight'])
# do the different types of surveys
Preg,Mreg,Sreg,Xreg,Dreg,survey_locations_by_group_reg = do_regular_survey(copy.deepcopy(routes),Preal,P,M,S,X,D,opts)
# plot the sampled mean disease density (true incidence)
pylab.matshow(Preal[:,:,1])
pylab.hot()
ax = pylab.gca()
ax.set_xticks([])
ax.set_yticks([])
pylab.title(r'$I(x)$')
if save_figures:
f2 = pylab.gcf()
f2.set_figwidth(3.5)
f2.set_figheight(3.5)
pylab.savefig('sampled_mean_density.pdf', bbox_inches='tight')
# plot the sampled trajectories, and inference
f1 = pylab.figure()
pylab.matshow(Preg[:,:,1],fignum=0)
path_colours = ['b','b','b']
path_linestyles = ['-','-','-']
for g in range(opts['num_groups']):
pylab.plot(routes[g][:,1],routes[g][:,0],c=path_colours[g],ls=path_linestyles[g],linewidth=2)
for k in range(opts['stops_per_group']):
pylab.plot(survey_locations_by_group_reg[g][k][1],survey_locations_by_group_reg[g][k][0],
ls='None',marker='o',color=path_colours[g],markersize=10,markeredgewidth=2)
pylab.hot()
ax = pylab.gca()
ax.set_xlim(0,100)
ax.set_ylim(100,0)
ax.set_xticks([])
ax.set_yticks([])
pylab.title(r'$P(y=d_2|x,\mathcal{D})$' )
if save_figures:
f1.set_figwidth(3.5)
f1.set_figheight(3.5)
pylab.savefig('sampled_trajectories.pdf', bbox_inches='tight')
'''
numpy.random.seed(0)
# do the different types of surveys
opts['longitude_limit'] = (0.,99.)
opts['latitude_limit'] = (0.,99.)
opts['mapwidth'] = 100
opts['mapheight'] = 100
opts['nsamples_per_stop'] = 5
opts['num_groups'] = 1
opts['total_survey_vertices'] = 12
opts['initial_samples'] = 5
opts['nsets_of_points'] = 50
opts['stops_per_group'] = 15
opts['num_disease_categories'] = 2
opts['stops_on_first_tour'] = 5
# simulate a new survey - high density at one end, low at the other
Preal,tmp = simulate_disease([opts['mapheight'],opts['mapwidth']])
for x in range(Preal.shape[1]):
for y in range(Preal.shape[0]):
'''
if y>50:
Preal[y,x,1] = 1
else:
Preal[y,x,1] = 0
Preal[y,x,0] = 1-Preal[y,x,1]
'''
'''
Preal[y,x,1] = y
Preal[y,x,0] = opts['mapheight']-y
'''
Preal[y,x,0] = numpy.sqrt((x-83)**2 + (y-83)**2)
Preal[:,:,0] = Preal[:,:,0] / Preal[:,:,0].max()
Preal[:,:,1] = numpy.ones((opts['mapheight'],opts['mapwidth'])) - Preal[:,:,0]
routes = simulate_routes([opts['mapheight'],opts['mapwidth']], opts['num_groups'], opts['total_survey_vertices'])
#routes = {}
#routes[0] = numpy.array([[20,80][20,20][80,20][80,80][20,80]])
# sample data at the origin
D = take_real_samples(Preal,origin,opts['initial_samples'])
# if any categories are missing from the sample, add them here
missing_categories = np.array([np.setdiff1d(np.arange(1,opts['num_disease_categories']+1),D[0,:])])
D = np.hstack((D,missing_categories))
X = np.tile(origin,(opts['initial_samples']+len(missing_categories[0]),1))
# initial GP estimate given measurements at origin
P, M, S = gpor_2D_grid(X, D, opts['longitude_limit'], opts['latitude_limit'], opts['mapwidth'], opts['mapheight'])
Preg,Mreg,Sreg,Xreg,Dreg,survey_locations_by_group_reg = do_optimised_survey(copy.deepcopy(routes),Preal,P,M,S,X,D,opts,weighted=False)
Popt,Mopt,Sopt,Xopt,Dopt,survey_locations_by_group_opt = do_optimised_survey(copy.deepcopy(routes),Preal,P,M,S,X,D,opts,weighted=True)
#do_regular_survey(copy.deepcopy(routes),Preal,P,M,S,X,D,opts)
# plot the results - regular survey
path_colours = ['b','g','b']
path_linestyles = ['-','--','-.']
pylab.figure()
pylab.matshow(Preal[:,:,1],fignum=0)
for g in range(opts['num_groups']):
pylab.plot(routes[g][:,1],routes[g][:,0],c=path_colours[g],ls=path_linestyles[g],linewidth=2)
for k in range(opts['stops_per_group']):
pylab.plot(survey_locations_by_group_reg[g][k][1],survey_locations_by_group_reg[g][k][0],
ls='None',marker='o',color=path_colours[g],markersize=10,markeredgewidth=2)
pylab.title(r'$w(I(x))=1$')
pylab.hot()
ax = pylab.gca()
ax.set_xlim(0,100)
ax.set_ylim(100,0)
ax.set_xticks([])
ax.set_yticks([])
if save_figures:
pylab.gca().set_xticks([])
pylab.gca().set_yticks([])
pylab.gcf().set_figwidth(3.5)
pylab.gcf().set_figheight(3.5)
pylab.savefig('locations-regular.pdf', bbox_inches='tight')
# plot the results - optimised survey
pylab.figure()
pylab.matshow(Preal[:,:,1],fignum=0)
for g in range(opts['num_groups']):
pylab.plot(routes[g][:,1],routes[g][:,0],c=path_colours[g],ls=path_linestyles[g],linewidth=2)
for k in range(opts['stops_per_group']):
pylab.plot(survey_locations_by_group_opt[g][k][1],survey_locations_by_group_opt[g][k][0],
ls='None',marker='o',color=path_colours[g],markersize=10,markeredgewidth=2)
pylab.title(r'$w(I(x))=I(x)+c$')
pylab.hot()
ax = pylab.gca()
ax.set_xlim(0,100)
ax.set_ylim(100,0)
ax.set_xticks([])
ax.set_yticks([])
if save_figures:
pylab.gca().set_xticks([])
pylab.gca().set_yticks([])
pylab.gcf().set_figwidth(3.5)
pylab.gcf().set_figheight(3.5)
pylab.savefig('locations-optimised.pdf', bbox_inches='tight')
pylab.ion()
pylab.show()
| StarcoderdataPython |
4947611 | <reponame>CodeChain-io/codechain-sdk-python
from .transaction import Transaction
class Remove(Transaction):
pass
| StarcoderdataPython |
215337 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding:utf-8 -*-
from typing import List
import os
import numpy as np
import onnx
from onnx import ModelProto
import onnxruntime as onrt
from onnxruntime import InferenceSession
from .interface import BaseRunner
from .onnx_graph import OXGraph
class OXRunner(BaseRunner):
def __init__(self, device_id, b_gpu=False):
self.did = device_id if b_gpu else -1
self.b_gpu = b_gpu
def infer(self, graph: OXGraph, out_names: List[str],
dtypes: List[str]=None, feed_dict=None) -> List[np.ndarray]:
tmp_name = "_gc_tmp_run.onnx"
ori_mod = ModelProto()
ori_mod.CopyFrom(graph.mod)
if not isinstance(graph, OXGraph):
raise TypeError("input graph not OXGraph")
if feed_dict is not None and not isinstance(feed_dict, dict):
raise TypeError("invalid feed_dict. {}".format(type(feed_dict)))
feed = self._get_feed_dict(graph) if feed_dict is None else feed_dict
out_names, out_nums, dtypes = self._gen_out_info(graph,
out_names, dtypes)
graph.clear_output()
for name, dtype in zip(out_names, dtypes):
graph.add_output_node(name, dtype)
onnx.save(graph.mod, tmp_name)
graph.mod.CopyFrom(ori_mod)
sess = onrt.InferenceSession(tmp_name)
self._set_exec_device(sess)
out_tensors = sess.run(out_names, feed)
os.remove(tmp_name)
res = []
b_idx = 0
for num in out_nums:
res.append(out_tensors[b_idx: (b_idx+num)])
b_idx += num
return res
def _set_exec_device(self, sess: InferenceSession):
providers = sess.get_providers()
if "CUDAExecutionProvider" in providers and self.b_gpu:
sess.set_providers(["CUDAExecutionProvider"])
elif "CPUExecutionProvider" in providers:
sess.set_providers(["CPUExecutionProvider"])
else:
raise RuntimeError("unknown providers:{}".format(providers))
@staticmethod
def _get_feed_dict(graph: OXGraph):
in_nodes = graph.get_net_input_nodes()
feed_dict = {}
for node in in_nodes:
nd_array = node.get_rand_tensor(2.0)
feed_dict[node.name] = nd_array
return feed_dict
@staticmethod
def _gen_out_info(graph, out_names, dtypes):
out_tensor_names = []
out_nums = []
for name in out_names:
node = graph.get_node(name)
cur_out = node.out_name
out_tensor_names.extend(cur_out)
out_nums.append(len(cur_out))
if dtypes is not None:
if len(dtypes) != len(out_tensor_names):
raise RuntimeError("length not match:{}, {}".format(
len(dtypes), len(out_tensor_names)))
else:
dtypes = ["float32" for name in out_tensor_names]
return out_tensor_names, out_nums, dtypes
| StarcoderdataPython |
1868208 | from __future__ import absolute_import
from __future__ import unicode_literals
import mock
from django.core.exceptions import ValidationError
from django.test import TestCase
from simplemathcaptcha.widgets import MathCaptchaWidget
from simplemathcaptcha.fields import MathCaptchaField
class FieldTests(TestCase):
def test_instantiation(self):
f = MathCaptchaField()
self.assertTrue(f.required)
self.assertTrue(f.widget.is_required)
self.assertEqual(len(f.fields), 2)
@mock.patch('simplemathcaptcha.fields.MathCaptchaWidget')
def test_instantiation_with_values(self, mocked):
MathCaptchaField(start_int=5, end_int=10)
mocked.assert_called_once_with(start_int=5, end_int=10)
@mock.patch('simplemathcaptcha.fields.MathCaptchaWidget')
def test_instantiation_with_widget(self, mocked):
MathCaptchaField(widget=MathCaptchaWidget())
self.assertEqual(mocked.call_count, 0)
def test_instantiation_with_widget_and_values_is_error(self):
with self.assertRaises(TypeError):
MathCaptchaField(start_int=5, end_int=10,
widget=MathCaptchaWidget())
def test_compress(self):
f = MathCaptchaField()
with mock.patch('simplemathcaptcha.fields.hash_answer') as mocked:
mocked.return_value = 'hashed_answer'
result = f.compress(['abc', 'hashed_answer'])
self.assertIsNone(result)
def test_compress_with_wrong_answer(self):
f = MathCaptchaField()
with mock.patch('simplemathcaptcha.fields.hash_answer') as mocked:
mocked.return_value = 'bad_hashed_answer'
with self.assertRaises(ValidationError):
f.compress(['abc', 'hashed_answer'])
def test_compress_with_nothing(self):
f = MathCaptchaField()
result = f.compress([])
self.assertIsNone(result)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.