code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import os
import time
import subprocess
import pyblish.api
class MyAction(pyblish.api.Action):
label = "My Action"
on = "processed"
def process(self, context, plugin):
self.log.info("Running!")
class MyOtherAction(pyblish.api.Action):
label = "My Other Action"
def process(self, context, plugin):
self.log.info("Running!")
class CollectComment(pyblish.api.ContextPlugin):
"""This collector has a very long comment.
The idea is that this comment should either be elided, or word-
wrapped in the corresponding view.
"""
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["comment"] = ""
class MyCollector(pyblish.api.ContextPlugin):
label = "My Collector"
order = pyblish.api.CollectorOrder
def process(self, context):
context.create_instance("MyInstance 1", families=["myFamily"])
context.create_instance("MyInstance 2", families=["myFamily 2"])
context.create_instance(
"MyInstance 3",
families=["myFamily 2"],
publish=False
)
class MyValidator(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
active = False
label = "My Validator"
actions = [MyAction,
MyOtherAction]
def process(self, instance):
self.log.info("Validating: %s" % instance)
class MyExtractor(pyblish.api.InstancePlugin):
order = pyblish.api.ExtractorOrder
families = ["myFamily"]
label = "My Extractor"
def process(self, instance):
self.log.info("Extracting: %s" % instance)
class CollectRenamed(pyblish.api.Collector):
def process(self, context):
i = context.create_instance("MyInstanceXYZ", family="MyFamily")
i.set_data("name", "My instance")
class CollectNegatron(pyblish.api.Collector):
"""Negative collector adds Negatron"""
order = pyblish.api.Collector.order - 0.49
def process_context(self, context):
self.log.info("Collecting Negatron")
context.create_instance("Negatron", family="MyFamily")
class CollectPositron(pyblish.api.Collector):
"""Positive collector adds Positron"""
order = pyblish.api.Collector.order + 0.49
def process_context(self, context):
self.log.info("Collecting Positron")
context.create_instance("Positron", family="MyFamily")
class SelectInstances(pyblish.api.Selector):
"""Select debugging instances
These instances are part of the evil plan to destroy the world.
Be weary, be vigilant, be sexy.
"""
def process_context(self, context):
self.log.info("Selecting instances..")
for instance in instances[:-1]:
name, data = instance["name"], instance["data"]
self.log.info("Selecting: %s" % name)
instance = context.create_instance(name)
for key, value in data.items():
instance.set_data(key, value)
class SelectDiInstances(pyblish.api.Selector):
"""Select DI instances"""
name = "Select Dependency Instances"
def process(self, context):
name, data = instances[-1]["name"], instances[-1]["data"]
self.log.info("Selecting: %s" % name)
instance = context.create_instance(name)
for key, value in data.items():
instance.set_data(key, value)
class SelectInstancesFailure(pyblish.api.Selector):
"""Select some instances, but fail before adding anything to the context.
That's right. I'm programmed to fail. Try me.
"""
__fail__ = True
def process_context(self, context):
self.log.warning("I'm about to fail")
raise AssertionError("I was programmed to fail")
class SelectInstances2(pyblish.api.Selector):
def process(self, context):
self.log.warning("I'm good")
class ValidateNamespace(pyblish.api.Validator):
"""Namespaces must be orange
In case a namespace is not orange, report immediately to
your officer in charge, ask for a refund, do a backflip.
This has been an example of:
- A long doc-string
- With a list
- And plenty of newlines and tabs.
"""
families = ["B"]
def process(self, instance):
self.log.info("Validating the namespace of %s" % instance.data("name"))
self.log.info("""And here's another message, quite long, in fact it's
too long to be displayed in a single row of text.
But that's how we roll down here. It's got \nnew lines\nas well.
- And lists
- And more lists
""")
class ValidateContext(pyblish.api.Validator):
families = ["A", "B"]
def process_context(self, context):
self.log.info("Processing context..")
class ValidateContextFailure(pyblish.api.Validator):
optional = True
families = ["C"]
__fail__ = True
def process_context(self, context):
self.log.info("About to fail..")
raise AssertionError("""I was programmed to fail
The reason I failed was because the sun was not aligned with the tides,
and the moon is gray; not yellow. Try again when the moon is yellow.""")
class Validator1(pyblish.api.Validator):
"""Test of the order attribute"""
order = pyblish.api.Validator.order + 0.1
families = ["A"]
def process_instance(self, instance):
pass
class Validator2(pyblish.api.Validator):
order = pyblish.api.Validator.order + 0.2
families = ["B"]
def process_instance(self, instance):
pass
class Validator3(pyblish.api.Validator):
order = pyblish.api.Validator.order + 0.3
families = ["B"]
def process_instance(self, instance):
pass
class ValidateFailureMock(pyblish.api.Validator):
"""Plug-in that always fails"""
optional = True
order = pyblish.api.Validator.order + 0.1
families = ["C"]
__fail__ = True
def process_instance(self, instance):
self.log.debug("e = mc^2")
self.log.info("About to fail..")
self.log.warning("Failing.. soooon..")
self.log.critical("Ok, you're done.")
raise AssertionError("""ValidateFailureMock was destined to fail..
Here's some extended information about what went wrong.
It has quite the long string associated with it, including
a few newlines and a list.
- Item 1
- Item 2
""")
class ValidateIsIncompatible(pyblish.api.Validator):
"""This plug-in should never appear.."""
requires = False # This is invalid
class ValidateWithRepair(pyblish.api.Validator):
"""A validator with repair functionality"""
optional = True
families = ["C"]
__fail__ = True
def process_instance(self, instance):
raise AssertionError(
"%s is invalid, try repairing it!" % instance.name
)
def repair_instance(self, instance):
self.log.info("Attempting to repair..")
self.log.info("Success!")
class ValidateWithRepairFailure(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
optional = True
families = ["C"]
__fail__ = True
def process_instance(self, instance):
raise AssertionError(
"%s is invalid, try repairing it!" % instance.name
)
def repair_instance(self, instance):
self.log.info("Attempting to repair..")
raise AssertionError("Could not repair due to X")
class ValidateWithVeryVeryVeryLongLongNaaaaame(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
families = ["A"]
class ValidateWithRepairContext(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
optional = True
families = ["C"]
__fail__ = True
def process_context(self, context):
raise AssertionError("Could not validate context, try repairing it")
def repair_context(self, context):
self.log.info("Attempting to repair..")
raise AssertionError("Could not repair")
class ExtractAsMa(pyblish.api.Extractor):
"""Extract contents of each instance into .ma
Serialise scene using Maya's own facilities and then put
it on the hard-disk. Once complete, this plug-in relies
on a Conformer to put it in it's final location, as this
extractor merely positions it in the users local temp-
directory.
"""
optional = True
__expected__ = {
"logCount": ">=4"
}
def process_instance(self, instance):
self.log.info("About to extract scene to .ma..")
self.log.info("Extraction went well, now verifying the data..")
if instance.name == "Richard05":
self.log.warning("You're almost running out of disk space!")
self.log.info("About to finish up")
self.log.info("Finished successfully")
class ConformAsset(pyblish.api.Conformer):
"""Conform the world
Step 1: Conform all humans and Step 2: Conform all non-humans.
Once conforming has completed, rinse and repeat.
"""
optional = True
def process_instance(self, instance):
self.log.info("About to conform all humans..")
if instance.name == "Richard05":
self.log.warning("Richard05 is a conformist!")
self.log.info("About to conform all non-humans..")
self.log.info("Conformed Successfully")
class ValidateInstancesDI(pyblish.api.Validator):
"""Validate using the DI interface"""
families = ["diFamily"]
def process(self, instance):
self.log.info("Validating %s.." % instance.data("name"))
class ValidateDIWithRepair(pyblish.api.Validator):
families = ["diFamily"]
optional = True
__fail__ = True
def process(self, instance):
raise AssertionError("I was programmed to fail, for repair")
def repair(self, instance):
self.log.info("Repairing %s" % instance.data("name"))
class ExtractInstancesDI(pyblish.api.Extractor):
"""Extract using the DI interface"""
families = ["diFamily"]
def process(self, instance):
self.log.info("Extracting %s.." % instance.data("name"))
class ValidateWithLabel(pyblish.api.Validator):
"""Validate using the DI interface"""
label = "Validate with Label"
class ValidateWithLongLabel(pyblish.api.Validator):
"""Validate using the DI interface"""
label = "Validate with Loooooooooooooooooooooong Label"
class SimplePlugin1(pyblish.api.Plugin):
"""Validate using the simple-plugin interface"""
def process(self):
self.log.info("I'm a simple plug-in, only processed once")
class SimplePlugin2(pyblish.api.Plugin):
"""Validate using the simple-plugin interface
It doesn't have an order, and will likely end up *before* all
other plug-ins. (due to how sorted([1, 2, 3, None]) works)
"""
def process(self, context):
self.log.info("Processing the context, simply: %s" % context)
class SimplePlugin3(pyblish.api.Plugin):
"""Simply process every instance"""
def process(self, instance):
self.log.info("Processing the instance, simply: %s" % instance)
class ContextAction(pyblish.api.Action):
label = "Context action"
def process(self, context):
self.log.info("I have access to the context")
self.log.info("Context.instances: %s" % str(list(context)))
class FailingAction(pyblish.api.Action):
label = "Failing action"
def process(self):
self.log.info("About to fail..")
raise Exception("I failed")
class LongRunningAction(pyblish.api.Action):
label = "Long-running action"
def process(self):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Ah, that's better")
class IconAction(pyblish.api.Action):
label = "Icon action"
icon = "crop"
def process(self):
self.log.info("I have an icon")
class PluginAction(pyblish.api.Action):
label = "Plugin action"
def process(self, plugin):
self.log.info("I have access to my parent plug-in")
self.log.info("Which is %s" % plugin.id)
class LaunchExplorerAction(pyblish.api.Action):
label = "Open in Explorer"
icon = "folder-open"
def process(self, context):
cwd = os.getcwd()
self.log.info("Opening %s in Explorer" % cwd)
result = subprocess.call("start .", cwd=cwd, shell=True)
self.log.debug(result)
class ProcessedAction(pyblish.api.Action):
label = "Success action"
icon = "check"
on = "processed"
def process(self):
self.log.info("I am only available on a successful plug-in")
class FailedAction(pyblish.api.Action):
label = "Failure action"
icon = "close"
on = "failed"
class SucceededAction(pyblish.api.Action):
label = "Success action"
icon = "check"
on = "succeeded"
def process(self):
self.log.info("I am only available on a successful plug-in")
class LongLabelAction(pyblish.api.Action):
label = "An incredibly, incredicly looooon label. Very long."
icon = "close"
class BadEventAction(pyblish.api.Action):
label = "Bad event action"
on = "not exist"
class InactiveAction(pyblish.api.Action):
active = False
class PluginWithActions(pyblish.api.Validator):
optional = True
actions = [
pyblish.api.Category("General"),
ContextAction,
FailingAction,
LongRunningAction,
IconAction,
PluginAction,
pyblish.api.Category("Empty"),
pyblish.api.Category("OS"),
LaunchExplorerAction,
pyblish.api.Separator,
FailedAction,
SucceededAction,
pyblish.api.Category("Debug"),
BadEventAction,
InactiveAction,
LongLabelAction,
pyblish.api.Category("Empty"),
]
def process(self):
self.log.info("Ran PluginWithActions")
class FailingPluginWithActions(pyblish.api.Validator):
optional = True
actions = [
FailedAction,
SucceededAction,
]
def process(self):
raise Exception("I was programmed to fail")
class ValidateDefaultOff(pyblish.api.Validator):
families = ["A", "B"]
active = False
optional = True
def process(self, instance):
self.log.info("Processing instance..")
class ValidateWithHyperlinks(pyblish.api.Validator):
"""To learn about Pyblish
<a href="http://pyblish.com">click here</a> (http://pyblish.com)
"""
families = ["A", "B"]
def process(self, instance):
self.log.info("Processing instance..")
msg = "To learn about Pyblish, <a href='http://pyblish.com'>"
msg += "click here</a> (http://pyblish.com)"
self.log.info(msg)
class LongRunningCollector(pyblish.api.Collector):
"""I will take at least 2 seconds..."""
def process(self, context):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Good morning")
class LongRunningValidator(pyblish.api.Validator):
"""I will take at least 2 seconds..."""
def process(self, context):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Good morning")
class RearrangingPlugin(pyblish.api.ContextPlugin):
"""Sort plug-ins by family, and then reverse it"""
order = pyblish.api.CollectorOrder + 0.2
def process(self, context):
self.log.info("Reversing instances in the context..")
context[:] = sorted(
context,
key=lambda i: i.data["family"],
reverse=True
)
self.log.info("Reversed!")
class InactiveInstanceCollectorPlugin(pyblish.api.InstancePlugin):
"""Special case of an InstancePlugin running as a Collector"""
order = pyblish.api.CollectorOrder + 0.1
active = False
def process(self, instance):
raise TypeError("I shouldn't have run in the first place")
class CollectWithIcon(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
def process(self, context):
instance = context.create_instance("With Icon")
instance.data["icon"] = "play"
instances = [
{
"name": "Peter01",
"data": {
"family": "A",
"publish": False
}
},
{
"name": "Richard05",
"data": {
"family": "A",
}
},
{
"name": "Steven11",
"data": {
"family": "B",
}
},
{
"name": "Piraya12",
"data": {
"family": "B",
}
},
{
"name": "Marcus",
"data": {
"family": "C",
}
},
{
"name": "Extra1",
"data": {
"family": "C",
}
},
{
"name": "DependencyInstance",
"data": {
"family": "diFamily"
}
},
{
"name": "NoFamily",
"data": {}
},
{
"name": "Failure 1",
"data": {
"family": "failure",
"fail": False
}
},
{
"name": "Failure 2",
"data": {
"family": "failure",
"fail": True
}
}
]
plugins = [
MyCollector,
MyValidator,
MyExtractor,
CollectRenamed,
CollectNegatron,
CollectPositron,
SelectInstances,
SelectInstances2,
SelectDiInstances,
SelectInstancesFailure,
ValidateFailureMock,
ValidateNamespace,
# ValidateIsIncompatible,
ValidateWithVeryVeryVeryLongLongNaaaaame,
ValidateContext,
ValidateContextFailure,
Validator1,
Validator2,
Validator3,
ValidateWithRepair,
ValidateWithRepairFailure,
ValidateWithRepairContext,
ValidateWithLabel,
ValidateWithLongLabel,
ValidateDefaultOff,
ValidateWithHyperlinks,
ExtractAsMa,
ConformAsset,
SimplePlugin1,
SimplePlugin2,
SimplePlugin3,
ValidateInstancesDI,
ExtractInstancesDI,
ValidateDIWithRepair,
PluginWithActions,
FailingPluginWithActions,
# LongRunningCollector,
# LongRunningValidator,
RearrangingPlugin,
InactiveInstanceCollectorPlugin,
CollectComment,
CollectWithIcon,
]
pyblish.api.sort_plugins(plugins)
| [
"subprocess.call",
"time.sleep",
"os.getcwd"
] | [((11574, 11587), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (11584, 11587), False, 'import time\n'), ((12143, 12154), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12152, 12154), False, 'import os\n'), ((12226, 12273), 'subprocess.call', 'subprocess.call', (['"""start ."""'], {'cwd': 'cwd', 'shell': '(True)'}), "('start .', cwd=cwd, shell=True)\n", (12241, 12273), False, 'import subprocess\n'), ((14799, 14812), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14809, 14812), False, 'import time\n'), ((15038, 15051), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15048, 15051), False, 'import time\n')] |
import cv2
fs = cv2.FileStorage("back.yaml", cv2.FILE_STORAGE_READ)
camera_matrix = fs.getNode("camera_matrix").mat()
dist_coeffs = fs.getNode("dist_coeffs").mat()
resolution = fs.getNode("resolution").mat().flatten()
#load_camera_params()
print(camera_matrix)
| [
"cv2.FileStorage"
] | [((18, 69), 'cv2.FileStorage', 'cv2.FileStorage', (['"""back.yaml"""', 'cv2.FILE_STORAGE_READ'], {}), "('back.yaml', cv2.FILE_STORAGE_READ)\n", (33, 69), False, 'import cv2\n')] |
import requests
from bs4 import BeautifulSoup
from csv import writer
html_doc = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<title>My Webpage</title>
</head>
<body>
<div id="section-1">
<h3 data-hello="hi">Hello</h3>
<img src="https://source.unsplash.com/200x200/?nature,water" />
<p>
Lorem ipsum dolor sit amet consectetur adipisicing elit. Iusto
culpa cumque velit aperiam officia molestias maiores qui
officiis incidunt. Omnis vitae eveniet reprehenderit excepturi
officiis quod, eum natus voluptatem nihil fugit necessitatibus
dolorum quae accusamus aliquid enim fuga dicta beatae!
</p>
</div>
<div id="section-2">
<ul class="items">
<li class="item"><a href="#">Item 1</a></li>
<li class="item"><a href="#">Item 2</a></li>
<li class="item"><a href="#">Item 3</a></li>
<li class="item"><a href="#">Item 4</a></li>
<li class="item"><a href="#">Item 5</a></li>
</ul>
</div>
</body>
</html>"""
response = requests.get('https://webscraper.io/blog/')
soup = BeautifulSoup(response.text, 'html.parser')
posts = soup.find_all(class_='blogno')
with open('posts.csv', 'w') as csv_file:
csv_writer = writer(csv_file)
headers = ['Title', 'Link', 'Date']
csv_writer.writerow(headers)
for post in posts:
title = post.find(class_='titleblog').get_text().replace('\n', '')
link = post.find('a')['href']
date = post.select('.date')[0].get_text()
csv_writer.writerow([title, link, date])
| [
"bs4.BeautifulSoup",
"csv.writer",
"requests.get"
] | [((1419, 1462), 'requests.get', 'requests.get', (['"""https://webscraper.io/blog/"""'], {}), "('https://webscraper.io/blog/')\n", (1431, 1462), False, 'import requests\n'), ((1473, 1516), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (1486, 1516), False, 'from bs4 import BeautifulSoup\n'), ((1621, 1637), 'csv.writer', 'writer', (['csv_file'], {}), '(csv_file)\n', (1627, 1637), False, 'from csv import writer\n')] |
# -*- coding: utf-8 -*-
"""
アカウントのアクセストークンとアクセストークンシークレットを xAuth で取得するファイル
"""
import base64
import os
import oauth2
import urllib.parse
from TwitterAPI import TwitterAPI
class TwitterXAuth:
def __init__(self):
# Windows 環境向けの hack
# 参考: https://stackoverflow.com/questions/31469707/changing-the-locale-preferred-encoding-in-python-3-in-windows
if os.name == 'nt':
import _locale
_locale._getdefaultlocale_backup = _locale._getdefaultlocale
_locale._getdefaultlocale = (lambda *args: (_locale._getdefaultlocale_backup()[0], 'UTF-8'))
# Twitter API の各種キーを取得
encoded = [
b'M25WdVNvQlpueDZVNHZ6VXhmNXc=',
b'QmNzNTlFRmJic2RGNlNsOU5nNzFzbWdTdFdFR3dYWEtTall2UFZ0N3F5cw==']
self.consumer_key = base64.standard_b64decode(encoded[0]).decode()
self.consumer_secret = base64.standard_b64decode(encoded[1]).decode()
# 各種キーのいずれかが取得できなかったらエラー
if self.consumer_key is None or self.consumer_secret is None:
raise Exception('The Twitter API consumer key or access token has not been set.')
def xauth(self, screen_name, password, endpoint='https://api.twitter.com/oauth/access_token'):
"""
スクリーンネームとパスワードで xAuth を行う
参考: https://github.com/yuitest/twitterxauth/blob/master/twitterxauth/__init__.py
@param screen_name スクリーンネーム
@param password <PASSWORD>
@return アクセストークンとアクセストークンシークレットのタプル
"""
# ヘッダーを設定
headers = TwitterAPI.generate_header(self.consumer_key)
print(headers)
# OAuth2 認証
consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
client = oauth2.Client(consumer)
client.add_credentials(screen_name, password)
client.set_signature_method = oauth2.SignatureMethod_HMAC_SHA1()
response, token = client.request(
endpoint, method='POST', headers=headers, body=urllib.parse.urlencode({
'x_auth_mode': 'client_auth',
'x_auth_username': screen_name,
'x_auth_password': password,
}))
# 返ってきたメッセージを解析
parsed_token = dict(urllib.parse.parse_qsl(token.decode('UTF-8')))
if parsed_token == {}: # parse_token が空 → 認証に失敗したので例外を投げる
raise Exception(token.decode('UTF-8'))
return parsed_token['oauth_token'], parsed_token['oauth_token_secret']
if __name__ == '__main__':
# 初期化
instance = TwitterXAuth()
# スクリーンネームを取得
screen_name = input('Screen name: ')
# パスワードを取得
password = input('Password: ')
# xAuth を実行
access_token, access_token_secret = instance.xauth(screen_name, password)
# アクセストークンとアクセストークンシークレットを表示
print('Access token : ' + access_token)
print('Access token secret: ' + access_token_secret)
| [
"oauth2.Consumer",
"oauth2.SignatureMethod_HMAC_SHA1",
"TwitterAPI.TwitterAPI.generate_header",
"oauth2.Client",
"base64.standard_b64decode",
"_locale._getdefaultlocale_backup"
] | [((1528, 1573), 'TwitterAPI.TwitterAPI.generate_header', 'TwitterAPI.generate_header', (['self.consumer_key'], {}), '(self.consumer_key)\n', (1554, 1573), False, 'from TwitterAPI import TwitterAPI\n'), ((1637, 1693), 'oauth2.Consumer', 'oauth2.Consumer', (['self.consumer_key', 'self.consumer_secret'], {}), '(self.consumer_key, self.consumer_secret)\n', (1652, 1693), False, 'import oauth2\n'), ((1711, 1734), 'oauth2.Client', 'oauth2.Client', (['consumer'], {}), '(consumer)\n', (1724, 1734), False, 'import oauth2\n'), ((1827, 1861), 'oauth2.SignatureMethod_HMAC_SHA1', 'oauth2.SignatureMethod_HMAC_SHA1', ([], {}), '()\n', (1859, 1861), False, 'import oauth2\n'), ((806, 843), 'base64.standard_b64decode', 'base64.standard_b64decode', (['encoded[0]'], {}), '(encoded[0])\n', (831, 843), False, 'import base64\n'), ((884, 921), 'base64.standard_b64decode', 'base64.standard_b64decode', (['encoded[1]'], {}), '(encoded[1])\n', (909, 921), False, 'import base64\n'), ((555, 589), '_locale._getdefaultlocale_backup', '_locale._getdefaultlocale_backup', ([], {}), '()\n', (587, 589), False, 'import _locale\n')] |
import keras.backend as K
from params import Params
from keras.losses import binary_crossentropy
def negative_avg_log_error(y_true, y_pred):
def sum_of_log_probabilities(true_and_pred):
y_true, y_pred = true_and_pred
losses = []
def get_loss_per_passage(true_and_pred):
y_true, y_pred = true_and_pred
start_probability = y_pred[0, K.cast(y_true[0], dtype="int32")]
end_probability = y_pred[1, K.cast(y_true[1], dtype="int32")]
return K.log(start_probability) + K.log(end_probability)
passage_loss_sum = K.map_fn(get_loss_per_passage, (y_true, y_pred), dtype="float32")
return K.mean(passage_loss_sum, axis=0)
y_true = K.squeeze(y_true, axis=-1)
batch_probability_sum = K.map_fn(sum_of_log_probabilities, (y_true, y_pred), dtype='float32')
return -K.mean(batch_probability_sum, axis=0)
"""
y_truth: (None, Params.max_passage_count, Params.max_passage_len) 1 or 0
y_pred : ... probability of word in answer
"""
def content_loss_function(y_truth, y_pred):
def cross_entropy(truth_and_pred):
y_truth, y_pred = truth_and_pred
def cross_entropy_per_passage(truth_and_pred):
y_truth, y_pred = truth_and_pred
return -K.mean(K.binary_crossentropy(y_truth, y_pred), axis=-1)
passage_loss = K.map_fn(cross_entropy_per_passage, (y_truth, y_pred), dtype="float32")
return K.mean(passage_loss)
batch_content_loss = K.map_fn(cross_entropy, (y_truth, y_pred), dtype="float32")
return -K.mean(batch_content_loss)
"""
y_truth: (None, Params.max_passage_count) 1 or 0 represents if document contains answer
y_pred : ...
"""
def verify_loss_function(y_truth, y_pred):
return -K.mean(K.log(K.map_fn(lambda truth, pred: truth*pred, (y_truth, y_pred)))) | [
"keras.backend.cast",
"keras.backend.mean",
"keras.backend.map_fn",
"keras.backend.squeeze",
"keras.backend.log",
"keras.backend.binary_crossentropy"
] | [((720, 746), 'keras.backend.squeeze', 'K.squeeze', (['y_true'], {'axis': '(-1)'}), '(y_true, axis=-1)\n', (729, 746), True, 'import keras.backend as K\n'), ((776, 845), 'keras.backend.map_fn', 'K.map_fn', (['sum_of_log_probabilities', '(y_true, y_pred)'], {'dtype': '"""float32"""'}), "(sum_of_log_probabilities, (y_true, y_pred), dtype='float32')\n", (784, 845), True, 'import keras.backend as K\n'), ((1493, 1552), 'keras.backend.map_fn', 'K.map_fn', (['cross_entropy', '(y_truth, y_pred)'], {'dtype': '"""float32"""'}), "(cross_entropy, (y_truth, y_pred), dtype='float32')\n", (1501, 1552), True, 'import keras.backend as K\n'), ((592, 657), 'keras.backend.map_fn', 'K.map_fn', (['get_loss_per_passage', '(y_true, y_pred)'], {'dtype': '"""float32"""'}), "(get_loss_per_passage, (y_true, y_pred), dtype='float32')\n", (600, 657), True, 'import keras.backend as K\n'), ((673, 705), 'keras.backend.mean', 'K.mean', (['passage_loss_sum'], {'axis': '(0)'}), '(passage_loss_sum, axis=0)\n', (679, 705), True, 'import keras.backend as K\n'), ((858, 895), 'keras.backend.mean', 'K.mean', (['batch_probability_sum'], {'axis': '(0)'}), '(batch_probability_sum, axis=0)\n', (864, 895), True, 'import keras.backend as K\n'), ((1359, 1430), 'keras.backend.map_fn', 'K.map_fn', (['cross_entropy_per_passage', '(y_truth, y_pred)'], {'dtype': '"""float32"""'}), "(cross_entropy_per_passage, (y_truth, y_pred), dtype='float32')\n", (1367, 1430), True, 'import keras.backend as K\n'), ((1446, 1466), 'keras.backend.mean', 'K.mean', (['passage_loss'], {}), '(passage_loss)\n', (1452, 1466), True, 'import keras.backend as K\n'), ((1565, 1591), 'keras.backend.mean', 'K.mean', (['batch_content_loss'], {}), '(batch_content_loss)\n', (1571, 1591), True, 'import keras.backend as K\n'), ((514, 538), 'keras.backend.log', 'K.log', (['start_probability'], {}), '(start_probability)\n', (519, 538), True, 'import keras.backend as K\n'), ((541, 563), 'keras.backend.log', 'K.log', (['end_probability'], {}), '(end_probability)\n', (546, 563), True, 'import keras.backend as K\n'), ((1781, 1842), 'keras.backend.map_fn', 'K.map_fn', (['(lambda truth, pred: truth * pred)', '(y_truth, y_pred)'], {}), '(lambda truth, pred: truth * pred, (y_truth, y_pred))\n', (1789, 1842), True, 'import keras.backend as K\n'), ((387, 419), 'keras.backend.cast', 'K.cast', (['y_true[0]'], {'dtype': '"""int32"""'}), "(y_true[0], dtype='int32')\n", (393, 419), True, 'import keras.backend as K\n'), ((461, 493), 'keras.backend.cast', 'K.cast', (['y_true[1]'], {'dtype': '"""int32"""'}), "(y_true[1], dtype='int32')\n", (467, 493), True, 'import keras.backend as K\n'), ((1286, 1324), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_truth', 'y_pred'], {}), '(y_truth, y_pred)\n', (1307, 1324), True, 'import keras.backend as K\n')] |
import setuptools
import os
import configparser
from pathlib import Path
PROJECT_ROOT_DIR = Path(__file__).parent
if __name__ == "__main__":
setup_cfg_path = os.path.join(os.path.dirname(__file__), "setup.cfg")
config = configparser.ConfigParser()
config.read(setup_cfg_path)
version = config["metadata"]["version"]
with open(PROJECT_ROOT_DIR / "eo_forge/VERSION", "w") as version_file:
version_file.write(version)
setuptools.setup()
| [
"os.path.dirname",
"setuptools.setup",
"configparser.ConfigParser",
"pathlib.Path"
] | [((94, 108), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (98, 108), False, 'from pathlib import Path\n'), ((232, 259), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (257, 259), False, 'import configparser\n'), ((453, 471), 'setuptools.setup', 'setuptools.setup', ([], {}), '()\n', (469, 471), False, 'import setuptools\n'), ((178, 203), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (193, 203), False, 'import os\n')] |
# Generated by Django 2.2.5 on 2019-10-28 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classified', '0004_auto_20191027_1129'),
]
operations = [
migrations.AlterField(
model_name='classifiedad',
name='body',
field=models.TextField(max_length=1000, verbose_name='body'),
),
]
| [
"django.db.models.TextField"
] | [((343, 397), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)', 'verbose_name': '"""body"""'}), "(max_length=1000, verbose_name='body')\n", (359, 397), False, 'from django.db import migrations, models\n')] |
from flask import Flask, Markup, render_template
import pandas as pd
import datetime as dt
import re
source = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
app = Flask(__name__)
class GraphCorona:
def __init__(self, timeframe, country, dates=[]):
self.timeframe = timeframe
self.country = country
self.dates = dates
self.df = pd.DataFrame(columns=['Country', 'Confirmed'])
def get_dates(self):
dates = []
for i in range(1, self.timeframe):
date = dt.datetime.today() - dt.timedelta(days=i)
self.dates.append(date.strftime("%m-%d-%Y"))
return self.dates
def get_paths(self):
sources = []
for date in self.dates:
path = source + date + '.csv'
sources.append(path)
return sources
def read_csv(self, path):
try:
df = pd.read_csv(path, usecols=['Country_Region', 'Confirmed'], sep=',')
df = df.rename(columns={"Country_Region": "Country"})
except ValueError:
try:
df = pd.read_csv(path, usecols=['Country/Region', 'Confirmed'], sep=',')
df = df.rename(columns={"Country/Region": "Country"})
except ValueError:
print(ValueError)
df = df[df['Country'].str.contains(self.country, flags = re.IGNORECASE)]
return df
def get_data(self):
df_list = []
sources = self.get_paths()
for source in sources:
df_list.append(self.read_csv(source))
self.df = pd.concat(df_list, ignore_index=True)
return self.df
@app.route('/')
def bar_chart():
maximum = 0
bar_dates = dates
bar_df = data['Confirmed'].tolist()
maximum += max(bar_df) + max(bar_df) / 10
return render_template('bar_chart.html', title="Confirmed cases in Romania", max=maximum, labels=bar_dates, values=bar_df)
interval = 10 #days
country = 'Romania'
corona = GraphCorona(interval, country)
dates = corona.get_dates()
data = corona.get_data()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
| [
"flask.render_template",
"pandas.read_csv",
"flask.Flask",
"datetime.datetime.today",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.concat"
] | [((233, 248), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (238, 248), False, 'from flask import Flask, Markup, render_template\n'), ((1864, 1984), 'flask.render_template', 'render_template', (['"""bar_chart.html"""'], {'title': '"""Confirmed cases in Romania"""', 'max': 'maximum', 'labels': 'bar_dates', 'values': 'bar_df'}), "('bar_chart.html', title='Confirmed cases in Romania', max=\n maximum, labels=bar_dates, values=bar_df)\n", (1879, 1984), False, 'from flask import Flask, Markup, render_template\n'), ((435, 481), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Country', 'Confirmed']"}), "(columns=['Country', 'Confirmed'])\n", (447, 481), True, 'import pandas as pd\n'), ((1634, 1671), 'pandas.concat', 'pd.concat', (['df_list'], {'ignore_index': '(True)'}), '(df_list, ignore_index=True)\n', (1643, 1671), True, 'import pandas as pd\n'), ((953, 1020), 'pandas.read_csv', 'pd.read_csv', (['path'], {'usecols': "['Country_Region', 'Confirmed']", 'sep': '""","""'}), "(path, usecols=['Country_Region', 'Confirmed'], sep=',')\n", (964, 1020), True, 'import pandas as pd\n'), ((589, 608), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (606, 608), True, 'import datetime as dt\n'), ((611, 631), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'i'}), '(days=i)\n', (623, 631), True, 'import datetime as dt\n'), ((1152, 1219), 'pandas.read_csv', 'pd.read_csv', (['path'], {'usecols': "['Country/Region', 'Confirmed']", 'sep': '""","""'}), "(path, usecols=['Country/Region', 'Confirmed'], sep=',')\n", (1163, 1219), True, 'import pandas as pd\n')] |
# AUTOGENERATED FILE! PLEASE DON'T EDIT
"""
This is for all short utilities that has the boilerplate feeling. Conversion clis
might feel they have different styles, as :class:`toFloat` converts object iterator to
float iterator, while :class:`toPIL` converts single image url to single PIL image,
whereas :class:`toSum` converts float iterator into a single float value.
The general convention is, if the intended operation sounds simple (convert to floats,
strings, types, ...), then most likely it will convert iterator to iterator, as you
can always use the function directly if you only want to apply it on 1 object.
If it sounds complicated (convert to PIL image, tensor, ...) then most likely it will
convert object to object. Lastly, there are some that just feels right to input
an iterator and output a single object (like getting max, min, std, mean values)."""
from k1lib.cli.init import patchDefaultDelim, BaseCli, Table, T
import k1lib.cli as cli, numbers, torch, numpy as np
from typing import overload, Iterator, Any, List, Set, Union
import k1lib
__all__ = ["size", "shape", "item", "identity", "iden",
"toStr", "join", "toNumpy", "toTensor",
"toList", "wrapList", "toSet", "toIter", "toRange", "toType",
"equals", "reverse", "ignore",
"toSum", "toAvg", "toMean", "toMax", "toMin", "toPIL",
"toBin", "toIdx",
"lengths", "headerIdx", "deref", "bindec"]
settings = k1lib.settings.cli
def exploreSize(it):
"""Returns first element and length of array"""
if isinstance(it, str): raise TypeError("Just here to terminate shape()")
sentinel = object(); it = iter(it)
o = next(it, sentinel); count = 1
if o is sentinel: return None, 0
try:
while True: next(it); count += 1
except StopIteration: pass
return o, count
class size(BaseCli):
def __init__(self, idx=None):
"""Returns number of rows and columns in the input.
Example::
# returns (3, 2)
[[2, 3], [4, 5, 6], [3]] | size()
# returns 3
[[2, 3], [4, 5, 6], [3]] | size(0)
# returns 2
[[2, 3], [4, 5, 6], [3]] | size(1)
# returns (2, 0)
[[], [2, 3]] | size()
# returns (3,)
[2, 3, 5] | size()
# returns 3
[2, 3, 5] | size(0)
# returns (3, 2, 2)
[[[2, 1], [0, 6, 7]], 3, 5] | size()
# returns (1,) and not (1, 3)
["abc"] | size()
# returns (1, 2, 3)
[torch.randn(2, 3)] | size()
# returns (2, 3, 5)
size()(np.random.randn(2, 3, 5))
There's also :class:`lengths`, which is sort of a simplified/faster version of
this, but only use it if you are sure that ``len(it)`` can be called.
If encounter PyTorch tensors or Numpy arrays, then this will just get the shape
instead of actually looping over them.
:param idx: if idx is None return (rows, columns). If 0 or 1, then rows or
columns"""
super().__init__(); self.idx = idx
def __ror__(self, it:Iterator[str]):
if self.idx is None:
answer = []
try:
while True:
if isinstance(it, (torch.Tensor, np.ndarray)):
return tuple(answer + list(it.shape))
it, s = exploreSize(it); answer.append(s)
except TypeError: pass
return tuple(answer)
else:
return exploreSize(it | cli.item(self.idx))[1]
shape = size
noFill = object()
class item(BaseCli):
def __init__(self, amt:int=1, fill=noFill):
"""Returns the first row.
Example::
# returns 0
iter(range(5)) | item()
# returns torch.Size([5])
torch.randn(3,4,5) | item(2) | shape()
# returns 3
[] | item(fill=3)
:param amt: how many times do you want to call item() back to back?
:param fill: if iterator length is 0, return this"""
self.amt = amt; self.fill = fill
self.fillP = [fill] if fill != noFill else [] # preprocessed, to be faster
def __ror__(self, it:Iterator[str]):
if self.amt != 1:
return it | cli.serial(*(item(fill=self.fill) for _ in range(self.amt)))
return next(iter(it), *self.fillP)
class identity(BaseCli):
"""Yields whatever the input is. Useful for multiple streams.
Example::
# returns range(5)
range(5) | identity()"""
def __ror__(self, it:Iterator[Any]): return it
iden = identity
class toStr(BaseCli):
def __init__(self, column:int=None):
"""Converts every line to a string.
Example::
# returns ['2', 'a']
[2, "a"] | toStr() | deref()
# returns [[2, 'a'], [3, '5']]
assert [[2, "a"], [3, 5]] | toStr(1) | deref()"""
super().__init__(); self.column = column
def __ror__(self, it:Iterator[str]):
c = self.column
if c is None:
for line in it: yield str(line)
else:
for row in it:
yield [e if i != c else str(e) for i, e in enumerate(row)]
class join(BaseCli):
def __init__(self, delim:str=None):
r"""Merges all strings into 1, with `delim` in the middle. Basically
:meth:`str.join`. Example::
# returns '2\na'
[2, "a"] | join("\n")"""
super().__init__(); self.delim = patchDefaultDelim(delim)
def __ror__(self, it:Iterator[str]):
return self.delim.join(it | toStr())
class toNumpy(BaseCli):
"""Converts generator to numpy array. Essentially ``np.array(list(it))``"""
def __ror__(self, it:Iterator[float]) -> np.array:
return np.array(list(it))
class toTensor(BaseCli):
def __init__(self, dtype=torch.float32):
"""Converts generator to :class:`torch.Tensor`. Essentially
``torch.tensor(list(it))``.
Also checks if input is a PIL Image. If yes, turn it into a :class:`torch.Tensor`
and return."""
self.dtype = dtype
def __ror__(self, it:Iterator[float]) -> torch.Tensor:
try:
import PIL; pic=it
if isinstance(pic, PIL.Image.Image): # stolen from torchvision ToTensor transform
mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32}
img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
if pic.mode == '1': img = 255 * img
img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
return img.permute((2, 0, 1)).contiguous().to(self.dtype) # put it from HWC to CHW format
except: pass
return torch.tensor(list(it)).to(self.dtype)
class toList(BaseCli):
"""Converts generator to list. :class:`list` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:Iterator[Any]) -> List[Any]:
return list(it)
class wrapList(BaseCli):
"""Wraps inputs inside a list. There's a more advanced cli tool
built from this, which is :meth:`~k1lib.cli.structural.unsqueeze`."""
def __ror__(self, it:T) -> List[T]:
return [it]
class toSet(BaseCli):
"""Converts generator to set. :class:`set` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:Iterator[T]) -> Set[T]:
return set(it)
class toIter(BaseCli):
"""Converts object to iterator. `iter()` would do the
same, but this is just to maintain the style"""
def __ror__(self, it:List[T]) -> Iterator[T]:
return iter(it)
class toRange(BaseCli):
"""Returns iter(range(len(it))), effectively"""
def __ror__(self, it:Iterator[Any]) -> Iterator[int]:
for i, _ in enumerate(it): yield i
class toType(BaseCli):
"""Converts object to its type.
Example::
# returns [int, float, str, torch.Tensor]
[2, 3.5, "ah", torch.randn(2, 3)] | toType() | deref()"""
def __ror__(self, it:Iterator[T]) -> Iterator[type]:
for e in it: yield type(e)
class _EarlyExp(Exception): pass
class equals:
"""Checks if all incoming columns/streams are identical"""
def __ror__(self, streams:Iterator[Iterator[str]]):
streams = list(streams)
for row in zip(*streams):
sampleElem = row[0]
try:
for elem in row:
if sampleElem != elem: yield False; raise _EarlyExp()
yield True
except _EarlyExp: pass
class reverse(BaseCli):
"""Reverses incoming list.
Example::
# returns [3, 5, 2]
[2, 5, 3] | reverse() | deref()"""
def __ror__(self, it:Iterator[str]) -> List[str]:
return reversed(list(it))
class ignore(BaseCli):
r"""Just loops through everything, ignoring the output.
Example::
# will just return an iterator, and not print anything
[2, 3] | apply(lambda x: print(x))
# will prints "2\n3"
[2, 3] | apply(lambda x: print(x)) | ignore()"""
def __ror__(self, it:Iterator[Any]):
for _ in it: pass
class toSum(BaseCli):
"""Calculates the sum of list of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 45
range(10) | toSum()"""
def __ror__(self, it:Iterator[float]):
if isinstance(it, torch.Tensor): return it.sum()
return sum(it)
class toAvg(BaseCli):
"""Calculates average of list of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 4.5
range(10) | toAvg()
# returns nan
[] | toAvg()"""
def __ror__(self, it:Iterator[float]):
if isinstance(it, torch.Tensor): return it.mean()
s = 0; i = -1
for i, v in enumerate(it): s += v
i += 1
if not k1lib.settings.cli.strict and i == 0: return float("nan")
return s / i
toMean = toAvg
class toMax(BaseCli):
"""Calculates the max of a bunch of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 6
[2, 5, 6, 1, 2] | toMax()"""
def __ror__(self, it:Iterator[float]) -> float:
if isinstance(it, torch.Tensor): return it.max()
return max(it)
class toMin(BaseCli):
"""Calculates the min of a bunch of numbers. Can pipe in :class:`torch.Tensor`.
Example::
# returns 1
[2, 5, 6, 1, 2] | toMin()"""
def __ror__(self, it:Iterator[float]) -> float:
if isinstance(it, torch.Tensor): return it.min()
return min(it)
class toPIL(BaseCli):
def __init__(self):
"""Converts a path to a PIL image.
Example::
ls(".") | toPIL().all() | item() # get first image"""
import PIL; self.PIL = PIL
def __ror__(self, path) -> "PIL.Image.Image":
return self.PIL.Image.open(path)
class toBin(BaseCli):
"""Converts integer to binary string.
Example::
# returns "101"
5 | toBin()"""
def __ror__(self, it):
return bin(int(it))[2:]
class toIdx(BaseCli):
def __init__(self, chars:str):
"""Get index of characters according to a reference.
Example::
# returns [1, 4, 4, 8]
"#&&*" | toIdx("!#$%&'()*+") | deref()"""
self.chars = {v:k for k, v in enumerate(chars)}
def __ror__(self, it):
chars = self.chars
for e in it: yield chars[e]
class lengths(BaseCli):
"""Returns the lengths of each element.
Example::
[range(5), range(10)] | lengths() == [5, 10]
This is a simpler (and faster!) version of :class:`shape`. It assumes each element
can be called with ``len(x)``, while :class:`shape` iterates through every elements
to get the length, and thus is much slower."""
def __ror__(self, it:Iterator[List[Any]]) -> Iterator[int]:
for e in it: yield len(e)
def headerIdx():
"""Cuts out first line, put an index column next to it, and prints it
out. Useful when you want to know what your column's index is to cut it
out. Also sets the context variable "header", in case you need it later.
Example::
# returns [[0, 'a'], [1, 'b'], [2, 'c']]
["abc"] | headerIdx() | deref()"""
return item() | cli.wrapList() | cli.transpose() | cli.insertIdColumn(True)
settings.atomic.add("deref", (numbers.Number, np.number, str, dict, bool, bytes, torch.nn.Module), "used by deref")
Tensor = torch.Tensor; atomic = settings.atomic
class inv_dereference(BaseCli):
def __init__(self, ignoreTensors=False):
"""Kinda the inverse to :class:`dereference`"""
super().__init__(); self.ignoreTensors = ignoreTensors
def __ror__(self, it:Iterator[Any]) -> List[Any]:
for e in it:
if e is None or isinstance(e, atomic.deref): yield e
elif isinstance(e, Tensor):
if not self.ignoreTensors and len(e.shape) == 0: yield e.item()
else: yield e
else:
try: yield e | self
except: yield e
class deref(BaseCli):
def __init__(self, maxDepth=float("inf"), ignoreTensors=True):
"""Recursively converts any iterator into a list. Only :class:`str`,
:class:`numbers.Number` and :class:`~torch.nn.Module` are not converted. Example::
# returns something like "<range_iterator at 0x7fa8c52ca870>"
iter(range(5))
# returns [0, 1, 2, 3, 4]
iter(range(5)) | deref()
# returns [2, 3], yieldSentinel stops things early
[2, 3, yieldSentinel, 6] | deref()
You can also specify a ``maxDepth``::
# returns something like "<list_iterator at 0x7f810cf0fdc0>"
iter([range(3)]) | deref(0)
# returns [range(3)]
iter([range(3)]) | deref(1)
# returns [[0, 1, 2]]
iter([range(3)]) | deref(2)
There are a few classes/types that are considered atomic, and :class:`deref`
will never try to iterate over it. If you wish to change it, do something like::
settings.cli.atomic.deref = (int, float, ...)
.. warning::
Can work well with PyTorch Tensors, but not Numpy arrays as they screw things up
with the __ror__ operator, so do torch.from_numpy(...) first. Don't worry about
unnecessary copying, as numpy and torch both utilizes the buffer protocol.
:param maxDepth: maximum depth to dereference. Starts at 0 for not doing anything
at all
:param ignoreTensors: if True, then don't loop over :class:`torch.Tensor`
internals"""
super().__init__(); self.ignoreTensors = ignoreTensors
self.maxDepth = maxDepth; self.depth = 0
def __ror__(self, it:Iterator[T]) -> List[T]:
ignoreTensors = self.ignoreTensors
if self.depth >= self.maxDepth: return it
elif isinstance(it, atomic.deref): return it
elif isinstance(it, Tensor):
if ignoreTensors: return it
if len(it.shape) == 0: return it.item()
try: iter(it)
except: return it
self.depth += 1; answer = []
for e in it:
if e is cli.yieldSentinel: return answer
answer.append(self.__ror__(e))
self.depth -= 1; return answer
def __invert__(self) -> BaseCli:
"""Returns a :class:`~k1lib.cli.init.BaseCli` that makes
everything an iterator. Not entirely sure when this comes in handy, but it's
there."""
return inv_dereference(self.ignoreTensors)
class bindec(BaseCli):
def __init__(self, cats:List[Any], f=None):
"""Binary decodes the input.
Example::
# returns ['a', 'c']
5 | bindec("abcdef")
# returns 'a,c'
5 | bindec("abcdef", join(","))
:param cats: categories
:param f: transformation function of the selected elements. Defaulted to :class:`toList`, but others like :class:`join` is useful too"""
self.cats = cats; self.f = f or toList()
def __ror__(self, it):
it = bin(int(it))[2:][::-1]
return (e for i, e in zip(it, self.cats) if i == '1') | self.f | [
"k1lib.cli.insertIdColumn",
"k1lib.cli.init.patchDefaultDelim",
"k1lib.cli.transpose",
"k1lib.cli.item",
"k1lib.cli.wrapList"
] | [((5150, 5174), 'k1lib.cli.init.patchDefaultDelim', 'patchDefaultDelim', (['delim'], {}), '(delim)\n', (5167, 5174), False, 'from k1lib.cli.init import patchDefaultDelim, BaseCli, Table, T\n'), ((11723, 11747), 'k1lib.cli.insertIdColumn', 'cli.insertIdColumn', (['(True)'], {}), '(True)\n', (11741, 11747), True, 'import k1lib.cli as cli, numbers, torch, numpy as np\n'), ((11705, 11720), 'k1lib.cli.transpose', 'cli.transpose', ([], {}), '()\n', (11718, 11720), True, 'import k1lib.cli as cli, numbers, torch, numpy as np\n'), ((11688, 11702), 'k1lib.cli.wrapList', 'cli.wrapList', ([], {}), '()\n', (11700, 11702), True, 'import k1lib.cli as cli, numbers, torch, numpy as np\n'), ((3346, 3364), 'k1lib.cli.item', 'cli.item', (['self.idx'], {}), '(self.idx)\n', (3354, 3364), True, 'import k1lib.cli as cli, numbers, torch, numpy as np\n')] |
import pytest
from faker import Faker
from django.db.utils import IntegrityError
from django.urls import reverse
from .factory import AccountSubTypeFactory
from ..models import AccountSubType, Account
from ..choices import AccountType
fake = Faker()
class TestAccountSubType:
def test_name_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('name')
assert field.verbose_name == 'name'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_type_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('type')
assert field.verbose_name == 'type'
assert field.choices == AccountType.choices
assert field.default == AccountType.Asset
assert field.has_default()
assert field.editable
assert not field.blank
assert not field.null
assert not field.hidden
assert not field.unique
def test_order_field(self, db):
sub_type = AccountSubType()
field = sub_type._meta.get_field('order')
assert field.verbose_name == 'order'
assert field.default == 0
assert field.has_default()
assert field.editable
assert field.blank
assert field.null
assert not field.hidden
assert not field.unique
def test_name_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
AccountSubType.objects.create(created_by=user, name=None, type=AccountType.Asset, order=0)
def test_type_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
AccountSubType.objects.create(created_by=user, name=fake.name(), type=None, order=0)
def test_order_can_be_null(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=None)
sub_type.save()
query = AccountSubType.objects.all()
saved_obj = query.first()
assert query.count() == 1
assert saved_obj.name == sub_type.name
assert saved_obj.type == sub_type.type
assert not saved_obj.order
def test_create(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
sub_type.save()
query = AccountSubType.objects.all()
saved_obj = query.first()
assert query.count() == 1
assert saved_obj.name == sub_type.name
assert saved_obj.type == sub_type.type
assert saved_obj.order == sub_type.order
def test_count(self, user):
AccountSubTypeFactory(created_by=user)
AccountSubTypeFactory(created_by=user)
query = AccountSubType.objects.all()
assert query.count() == 2
def test_edit(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
sub_type.save()
new_name = 'new name'
sub_type.name = new_name
sub_type.order = 1
sub_type.save()
saved_obj = AccountSubType.objects.first()
assert saved_obj.name == new_name
assert saved_obj.order == 1
def test_str(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert str(sub_type) == f'{sub_type.name}#{sub_type.get_type_display()}#{sub_type.order}'
def test_type_text(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert sub_type.type_text == sub_type.get_type_display()
def test_get_absolute_url(self, user):
sub_type = AccountSubType.objects.create(created_by=user, name=fake.name(), type=AccountType.Asset, order=0)
assert sub_type.get_absolute_url() == reverse('account:subtype:detail-update', args=[sub_type.pk])
class TestAccount:
def test_name_field(self, db):
account = Account()
field = account._meta.get_field('name')
assert field.__class__.__name__ == 'CharField'
assert field.verbose_name == 'name'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_code_field(self, db):
account = Account()
field = account._meta.get_field('code')
assert field.__class__.__name__ == 'CharField'
assert field.verbose_name == 'code'
assert field.max_length == 64
assert field.editable
assert not field.blank
assert not field.null
assert not field.has_default()
assert not field.hidden
assert not field.unique
def test_type_field(self, db):
account = Account()
field = account._meta.get_field('type')
assert field.__class__.__name__ == 'IntegerField'
assert field.verbose_name == 'type'
assert field.editable
assert field.blank
assert field.has_default()
assert field.choices == AccountType.choices
assert field.default == AccountType.Asset
assert not field.null
assert not field.hidden
assert not field.unique
def test_sub_type_field(self, db):
account = Account()
field = account._meta.get_field('sub_type')
assert field.__class__.__name__ == 'ForeignKey'
assert field.verbose_name == 'sub type'
assert field.editable
assert not field.blank
assert not field.has_default()
assert field.default.__name__ == 'NOT_PROVIDED'
assert not field.null
assert not field.hidden
assert not field.unique
def test_depth_field(self, db):
account = Account()
field = account._meta.get_field('depth')
assert field.__class__.__name__ == 'IntegerField'
assert field.verbose_name == 'depth'
assert field.default == 0
assert field.has_default()
assert field.editable
assert field.blank
assert field.null
assert not field.hidden
assert not field.unique
def test_entry_date_field(self):
account = Account()
field = account._meta.get_field('entry_date')
assert field.__class__.__name__ == 'DateField'
assert field.name == 'entry_date'
assert field.verbose_name == 'entry date'
assert field.blank
assert field.null
assert field.default
assert field.has_default()
assert not field.unique
def test_description_field(self, db):
account = Account()
field = account._meta.get_field('description')
assert field.__class__.__name__ == 'TextField'
assert field.verbose_name == 'description'
assert not field.max_length
assert field.editable
assert field.blank
assert field.null
assert not field.default
assert field.has_default()
assert not field.hidden
assert not field.unique
def test_name_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=None, code=fake.random_int(0, 100), type=AccountType.Asset, sub_type=sub_type, created_by=user)
def test_code_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=None, type=AccountType.Asset, sub_type=sub_type, created_by=user)
def test_type_cannot_be_null(self, user, sub_type):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), type=None, sub_type=sub_type, created_by=user)
def test_sub_type_cannot_be_null(self, user):
with pytest.raises(IntegrityError) as error:
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), type=AccountType.Liability, sub_type=None, created_by=user)
def test_type_default_asset(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
assert account.type == AccountType.Asset
def test_depth_default_0(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
assert account.depth == 0
def test_str(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert str(account) == f'{account.name}:{account.code}'
def test_str_with_parent(self, user, sub_type):
parent = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
account = Account.objects.create(parent=parent, name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert str(account) == f'{account.name}:{account.code}#{parent.name}:{parent.code}'
def test_type_text(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert account.type_text == account.get_type_display()
def test_sub_type_text(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert account.sub_type_text == account.sub_type.name
def test_create(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
instance = Account.objects.get(pk=account.pk)
assert account.pk == instance.pk
def test_count(self, user, sub_type):
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
count = Account.objects.all()
assert count.count() == 3
def test_edit(self, user, sub_type):
account = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type, created_by=user)
account.code = str(1234)
account.save()
instance = Account.objects.get(pk=account.pk)
assert instance.code == str(1234)
def test_set_depth(self, user, sub_type):
parent = Account.objects.create(name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
account = Account.objects.create(parent=parent, name=fake.name(), code=fake.random_int(1, 100), sub_type=sub_type,created_by=user)
assert parent.depth == 0
assert account.depth == 1
| [
"faker.Faker",
"pytest.raises",
"django.urls.reverse"
] | [((244, 251), 'faker.Faker', 'Faker', ([], {}), '()\n', (249, 251), False, 'from faker import Faker\n'), ((1578, 1607), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (1591, 1607), False, 'import pytest\n'), ((1781, 1810), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (1794, 1810), False, 'import pytest\n'), ((4082, 4142), 'django.urls.reverse', 'reverse', (['"""account:subtype:detail-update"""'], {'args': '[sub_type.pk]'}), "('account:subtype:detail-update', args=[sub_type.pk])\n", (4089, 4142), False, 'from django.urls import reverse\n'), ((7433, 7462), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (7446, 7462), False, 'import pytest\n'), ((7679, 7708), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (7692, 7708), False, 'import pytest\n'), ((7913, 7942), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (7926, 7942), False, 'import pytest\n'), ((8147, 8176), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (8160, 8176), False, 'import pytest\n')] |
import pandas as pd
from polo2 import PoloDb
class PoloRetro:
def __init__(self, config):
self.config = config
self.corpus = None
self.model = None
self.retro = None
# todo: Rewrite as PoloCombiner or something and make this the init
def retro_combine(self, corpus_dbfile, model_dbfile, retro_dbfile=None):
self.corpus = PoloDb(corpus_dbfile)
self.model = PoloDb(model_dbfile)
if retro_dbfile == None:
retro_dbfile = '{}-retro-combo.db'.format(self.config.ini['DEFAULT']['slug'])
self.retro = PoloDb(retro_dbfile)
self.create_retro_db()
def create_all_tables(self):
self.create_config_table()
self.create_src_doc_meta_table()
self.create_src_doc_table()
self.create_word_table()
self.create_doc_table()
self.create_docword_table()
self.create_topic_table()
self.create_doctopic_table()
self.create_doctopic_long_table()
self.create_topicword_table()
self.create_topicword_long_table()
self.create_topicphrase_table()
self.create_topicpair_table()
self.create_topicpair_by_deps_table()
#self.create_doctopic_sig_table()
def create_doc_table(self):
doc = self.model.get_table('doc')
src_doc = self.corpus.get_table('doc')
new_doc = pd.DataFrame(columns=['doc_id', 'doc_label', 'doc_str'])
new_doc['doc_id'] = doc['doc_id']
doc.set_index('doc_id', inplace=True)
src_doc.set_index('doc_id', inplace=True)
new_doc.set_index('doc_id', inplace=True)
new_doc['doc_label'] = doc.doc_label
new_doc['doc_str'] = src_doc.doc_content
self.retro.put_table(new_doc, 'doc', if_exists='replace', index=True)
def create_src_doc_table(self):
src_doc = self.corpus.get_table('doc')
src_doc.set_index('doc_id', inplace=True)
new_src_doc = pd.DataFrame(columns='src_meta_id doc_id doc_title doc_uri doc_label doc_ord doc_content doc_original doc_year doc_date doc_citation'.split())
new_src_doc['doc_id'] = src_doc.index
new_src_doc.set_index('doc_id', inplace=True)
new_src_doc['doc_title'] = src_doc.doc_title
new_src_doc['doc_uri'] = src_doc.doc_key
new_src_doc['doc_uri'] = new_src_doc['doc_uri'].apply(lambda x: self.config.ini['DEFAULT']['src_base_url'] + str(x))
new_src_doc['doc_label'] = src_doc.doc_label
new_src_doc['doc_ord'] = None
new_src_doc['doc_content'] = src_doc.doc_content
new_src_doc['doc_original'] = src_doc.doc_original
if 'doc_year' in src_doc.columns:
new_src_doc['doc_year'] = src_doc.doc_year
if 'doc_date' in src_doc.columns:
new_src_doc['doc_date'] = src_doc.doc_date
new_src_doc['doc_citation'] = None
self.retro.put_table(new_src_doc, 'src_doc', if_exists='replace', index=True)
def create_src_doc_meta_table(self):
src_doc_meta = pd.DataFrame({'src_meta_id': self.config.ini['DEFAULT']['slug'],
'src_meta_desc': self.config.ini['DEFAULT']['title'],
'src_meta_base_url': self.config.ini['DEFAULT']['src_base_url'],
'src_meta_ord_type': None}, index=['src_meta_id']) # fixme: Need to add ord type to config and pass it
self.retro.put_table(src_doc_meta, 'src_doc_meta', if_exists='replace')
def create_word_table(self):
word = self.corpus.get_table('token')
new_word = pd.DataFrame(columns='word_id word_str word_freq word_stem'.split())
new_word['word_id'] = word.index
new_word.set_index('word_id', inplace=True)
new_word['word_str'] = word.token_str
new_word['word_freq'] = word.token_count
new_word['word_stem'] = None
self.retro.put_table(new_word, 'word', if_exists='replace', index=True)
def create_docword_table(self):
sql = "SELECT dt.doc_id, t.ROWID as 'word_id', t.token_str as 'word_str', t.token_count as 'word_count', NULL as 'tfidf_weight' " \
"FROM doctoken dt JOIN token t USING(token_str)"
new_docword = pd.read_sql_query(sql, self.corpus.conn)
self.retro.put_table(new_docword, 'docword', if_exists='replace')
def create_config_table(self):
config = self.model.get_table('config')
self.retro.put_table(config, 'config', if_exists='replace')
def create_doctopic_table(self):
doctopic = self.model.get_table('doctopic')
doctopic['topic_label'] = doctopic['topic_id'].apply(lambda x: 't{}'.format(x))
doctopic = doctopic[['doc_id', 'topic_label', 'topic_weight']]
doctopic.set_index(['doc_id', 'topic_label'], inplace=True)
doctopic_wide = doctopic.unstack().reset_index()
doctopic_wide.columns = doctopic_wide.columns.droplevel(0)
doctopic_wide.rename(columns={'': 'doc_id'}, inplace=True)
doc = self.model.get_table('doc')
doc.set_index('doc_id', inplace=True)
doctopic_wide = doctopic_wide.join(doc[['topic_entropy', 'doc_label']], how='left')
self.retro.put_table(doctopic_wide, 'doctopic', if_exists='replace')
def create_topic_table(self):
topic = self.model.get_table('topic')
new_topic = pd.DataFrame(columns='topic_id topic_alpha total_tokens topic_words'.split())
new_topic['topic_id'] = topic.topic_id
new_topic['topic_alpha'] = topic.topic_alpha
new_topic['topic_words'] = topic.topic_words
new_topic['total_tokens'] = topic.topic_tokens
self.retro.put_table(new_topic, 'topic', if_exists='replace')
def create_topicphrase_table(self):
topicphrase = self.model.get_table('topicphrase')
self.retro.put_table(topicphrase, 'topicphrase', if_exists='replace')
def create_topicword_table(self):
topicword = self.model.get_table('topicword')
word = self.model.get_table('word')
topicword['word_count'] = topicword['word_count'].astype(int)
topicword['topic_label'] = topicword['topic_id'].apply(lambda x: 't{}'.format(x))
topicword = topicword[['word_id', 'topic_label', 'word_count']]
topicword.set_index(['word_id', 'topic_label'], inplace=True)
topicword_wide = topicword.unstack().reset_index()
topicword_wide.columns = topicword_wide.columns.droplevel(0)
topicword_wide.rename(columns={'': 'word_id'}, inplace=True)
topicword_wide.fillna(0, inplace=True)
topicword_wide.set_index('word_id', inplace=True)
word.set_index('word_id', inplace=True)
topicword_wide['word_str'] = word.word_str
self.retro.put_table(topicword_wide, 'topicword', if_exists='replace', index=True)
def create_doctopic_long_table(self):
doctopic = self.model.get_table('doctopic')
self.retro.put_table(doctopic, 'doctopic_long', if_exists='replace')
def create_topicword_long_table(self):
topicword = self.model.get_table('topicword')
word = self.model.get_table('word')
topicword['word_count'] = topicword['word_count'].astype(int)
word.set_index('word_id', inplace=True)
topicword.set_index(['word_id','topic_id'], inplace=True)
topicword = topicword.join(word, how='left')
self.retro.put_table(topicword, 'topicword_long', if_exists='replace', index=True)
def create_topicpair_table(self):
topicpair = self.model.get_table('topicpair')
new_tp = pd.DataFrame(columns='topic_id1 topic_id2 cosine_sim js_div'.split())
new_tp['topic_id1'] = topicpair.topic_a_id
new_tp['topic_id2'] = topicpair.topic_b_id
new_tp['cosine_sim'] = topicpair.cosim
new_tp['js_div'] = topicpair.jsd
self.retro.put_table(new_tp, 'topicpair', if_exists='replace')
def create_topicpair_by_deps_table(self):
topicpair = self.model.get_table('topicpair')
topic = self.model.get_table('topic')
topicpair = topicpair.merge(topic[['topic_id', 'topic_rel_freq']], left_on='topic_a_id', right_on='topic_id', how='inner')
topicpair = topicpair.merge(topic[['topic_id', 'topic_rel_freq']], left_on='topic_b_id', right_on='topic_id', how='inner')
new_tp = pd.DataFrame(columns='topic_a topic_b p_a p_b p_ab p_aGb p_bGa i_ab'.split())
new_tp['topic_a'] = topicpair.topic_a_id
new_tp['topic_b'] = topicpair.topic_b_id
new_tp['p_a'] = topicpair.topic_rel_freq_x
new_tp['p_b'] = topicpair.topic_rel_freq_y
new_tp['p_ab'] = topicpair.p_ab
new_tp['p_aGb'] = topicpair.p_aGb
new_tp['p_bGa'] = topicpair.p_bGa
new_tp['i_ab'] = topicpair.i_ab
self.retro.put_table(new_tp, 'topicpair_by_deps')
def create_doctopic_sig_table(self):
pass
# fixme: The sql for tables with topics for columns need to be generated!
def create_retro_db(self):
sql_creators = """
CREATE TABLE IF NOT EXISTS src_doc_meta (src_meta_id TEXT,src_meta_desc TEXT,src_meta_base_url TEXT,src_meta_ord_type TEXT);
CREATE TABLE IF NOT EXISTS src_doc (src_meta_id TEXT,doc_id INTEGER PRIMARY KEY,doc_title TEXT,doc_uri TEXT UNIQUE,doc_label TEXT,doc_ord INTEGER,doc_content TEXT,doc_original TEXT,doc_year INTEGER,doc_date TEXT,doc_citation TEXT);
CREATE TABLE IF NOT EXISTS word (word_id INTEGER PRIMARY KEY,word_str TEXT,word_freq INTEGER,word_stem TEXT);
CREATE TABLE IF NOT EXISTS doc (doc_id INTEGER PRIMARY KEY,doc_label TEXT,doc_str TEXT);
CREATE TABLE IF NOT EXISTS docword (doc_id INTEGER,word_id INTEGER,word_str TEXT,word_count INTEGER,tfidf_weight REAL);
CREATE TABLE IF NOT EXISTS config (key TEXT, value TEXT);
CREATE TABLE IF NOT EXISTS topic (topic_id INTEGER PRIMARY KEY, topic_alpha REAL, total_tokens INTEGER, topic_words TEXT);
CREATE TABLE IF NOT EXISTS topicphrase (topic_id INTEGER, topic_phrase TEXT, phrase_count INTEGER, phrase_weight REAL);
CREATE TABLE IF NOT EXISTS doctopic_long (doc_id INTEGER NOT NULL, topic_id INTEGER NOT NULL, topic_weight REAL NOT NULL, UNIQUE (doc_id, topic_id));
CREATE TABLE IF NOT EXISTS topicword_long (word_id INTEGER NOT NULL, word_str TEXT NOT NULL, topic_id INTEGER NOT NULL, word_count INTEGER NOT NULL, UNIQUE (word_id, topic_id));
CREATE TABLE IF NOT EXISTS topicpair (topic_id1 INTEGER, topic_id2 INTEGER, cosine_sim REAL, js_div REAL);
CREATE TABLE IF NOT EXISTS topicpair_by_deps (topic_a INTEGER, topic_b INTEGER, p_a REAL, p_b REAL, p_ab REAL, p_aGb REAL, p_bGa REAL, i_ab REAL);
CREATE TABLE IF NOT EXISTS doctopic_sig (doc_id INTEGER PRIMARY KEY, topic_sig TEXT, topic_sig_sorted TEXT, topic_n INTEGER);
""".split(';')
# Handle wide tables
topic = self.model.get_table('topic')
n_topics = len(topic.topic_id.tolist())
topic_fields_real = ','.join(['t{} REAL'.format(tn) for tn in range(n_topics)])
topic_fields_int = ','.join(['t{} INTEGER'.format(tn) for tn in range(n_topics)])
sql_creators.append("CREATE TABLE IF NOT EXISTS doctopic (doc_id INTEGER PRIMARY KEY, doc_label TEXT, topic_entropy REAL, {})".format(topic_fields_real))
sql_creators.append("CREATE TABLE IF NOT EXISTS topicword (word_id INTEGER, word_str TEXT, {})".format(topic_fields_int))
for sql_create in sql_creators:
self.retro.conn.execute(sql_create)
if __name__ == '__main__':
pass
| [
"pandas.DataFrame",
"polo2.PoloDb",
"pandas.read_sql_query"
] | [((376, 397), 'polo2.PoloDb', 'PoloDb', (['corpus_dbfile'], {}), '(corpus_dbfile)\n', (382, 397), False, 'from polo2 import PoloDb\n'), ((419, 439), 'polo2.PoloDb', 'PoloDb', (['model_dbfile'], {}), '(model_dbfile)\n', (425, 439), False, 'from polo2 import PoloDb\n'), ((584, 604), 'polo2.PoloDb', 'PoloDb', (['retro_dbfile'], {}), '(retro_dbfile)\n', (590, 604), False, 'from polo2 import PoloDb\n'), ((1383, 1439), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['doc_id', 'doc_label', 'doc_str']"}), "(columns=['doc_id', 'doc_label', 'doc_str'])\n", (1395, 1439), True, 'import pandas as pd\n'), ((3021, 3267), 'pandas.DataFrame', 'pd.DataFrame', (["{'src_meta_id': self.config.ini['DEFAULT']['slug'], 'src_meta_desc': self.\n config.ini['DEFAULT']['title'], 'src_meta_base_url': self.config.ini[\n 'DEFAULT']['src_base_url'], 'src_meta_ord_type': None}"], {'index': "['src_meta_id']"}), "({'src_meta_id': self.config.ini['DEFAULT']['slug'],\n 'src_meta_desc': self.config.ini['DEFAULT']['title'],\n 'src_meta_base_url': self.config.ini['DEFAULT']['src_base_url'],\n 'src_meta_ord_type': None}, index=['src_meta_id'])\n", (3033, 3267), True, 'import pandas as pd\n'), ((4162, 4202), 'pandas.read_sql_query', 'pd.read_sql_query', (['sql', 'self.corpus.conn'], {}), '(sql, self.corpus.conn)\n', (4179, 4202), True, 'import pandas as pd\n')] |
import os
import torch
import argparse
import datetime
import numpy as np
import torch.optim as optim
from models.resnet import resnet50, Model, resnet50_1d
from losses.loss import ContrastiveLoss_
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from dataset.industry_dataset_identify import IndustryDatasetIdentifyDiff, IndustryDatasetIdentify
from sklearn.model_selection import train_test_split
from torch.cuda.amp.grad_scaler import GradScaler
from torch.cuda.amp import autocast
nowTime = datetime.datetime.now().strftime('%Y-%m-%d-%H')
result_dir = './results/pump_slider_fan_valve/all_ids/{}'.format(nowTime)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
def save_model(model, optimizer, step, path):
if len(os.path.dirname(path)) > 0 and not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
torch.save({
'model': model,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step': step,
# 'amp': amp.state_dict(),
}, path)
def load_model(model, optimizer, path):
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state_dict'])
if optimizer is not None:
checkpoint['optimizer_state_dict']['param_groups'][0]['lr'] = 0.001
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = checkpoint['step']
return step
def valiadate(model, criterion, val_loader):
model.eval()
with torch.no_grad():
total_loss = 0.0
for i, (mix, mix_, status) in enumerate(val_loader):
mix = mix.cuda()
mix_ = mix_.cuda()
status = status.cuda()
features, features_ = model(mix, mix_)
loss = criterion(features, features_, status)
total_loss += loss.item()
return total_loss
def get_files(path):
wav_files = []
for _, _, files in os.walk(path):
for f in files:
if f.split('.')[-1] == 'wav':
wav_files.append(f)
return wav_files
def get_ids(total_path):
train_normal_ids_files = []
val_normal_ids_files = []
for dir in os.listdir(total_path):
if dir.split('_')[0] != "id":
continue
normal_path = total_path + dir + '/normal'
normal_files = get_files(normal_path)
train_normals, val_normal = train_test_split(normal_files, random_state=42, test_size=0.2)
train_normal_ids_files.append(train_normals)
val_normal_ids_files.append(val_normal)
return train_normal_ids_files, val_normal_ids_files
def main(args):
print(args)
# dataset and dataloader
s1_path = '../MIMII/pump/'
s2_path = '../MIMII/slider/'
s3_path = '../MIMII/fan/'
s4_path = '../MIMII/valve/'
sources_path = [s1_path, s2_path, s3_path, s4_path]
train_s1_normal_ids_files, val_s1_normal_ids_files = get_ids(s1_path)
train_s2_normal_ids_files, val_s2_normal_ids_files = get_ids(s2_path)
train_s3_normal_ids_files, val_s3_normal_ids_files = get_ids(s3_path)
train_s4_normal_ids_files, val_s4_normal_ids_files = get_ids(s4_path)
train_normals = [train_s1_normal_ids_files, train_s2_normal_ids_files, train_s3_normal_ids_files, train_s4_normal_ids_files]
val_normals = [val_s1_normal_ids_files, val_s2_normal_ids_files, val_s3_normal_ids_files, val_s4_normal_ids_files]
train_dataset = IndustryDatasetIdentifyDiff(sources_path, train_normals)
val_dataset = IndustryDatasetIdentifyDiff(sources_path, val_normals, n=320)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
# generate the model
if args.load:
print("Continuing training full model from checkpoint " + str(args.load_model))
model = torch.load('./result_identify/pump_fan_valve/all_ids/model_best.pt')['model']
# model = torch.load('./result/2020-08-20-01/model_best.pt')['model']
else:
model = resnet50_1d(n_spk=4)
model = model.cuda()
# generate optimizer and learning rate scheduler
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), weight_decay=5e-4)
clr = lr_scheduler.CosineAnnealingLR(optimizer, 5, eta_min=1e-5)
# use mix precision training, may reduce the accuracy but increase the training speed
# model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
criterion = ContrastiveLoss_()
scaler = GradScaler()
# criterion = CosLoss()
# Set up training state dict that will also be saved into checkpoints
state = {"worse_epochs": 0,
"epochs": 0,
"best_loss": np.Inf,
'step': 0}
print('Start training...')
for i in range(args.epochs):
print("Training one epoch from iteration " + str(state["epochs"]))
model.train()
train_loss = 0.0
for i, (mix, mix_, status) in enumerate(train_loader):
mix = mix.cuda()
mix_ = mix_.cuda()
status = status.cuda()
cur_lr = optimizer.state_dict()['param_groups'][0]['lr']
optimizer.zero_grad()
with autocast():
features, features_ = model(mix, mix_)
loss = criterion(features, features_, status)
train_loss += loss.item()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
state['step'] += 1
if i % 20 == 0:
print("{:4d}/{:4d} --- Loss: {:.6f} with learnig rate {:.6f}".format(i, len(train_dataset) // args.batch_size, loss.item(), cur_lr))
clr.step()
val_loss = valiadate(model, criterion, val_loader)
train_loss = train_loss
val_loss = val_loss
print("Validation loss" + str(val_loss))
# EARLY STOPPING CHECK
checkpoint_path = args.model_path + str(state['epochs']) + '.pth'
print("Saving model...")
if val_loss >= state["best_loss"]:
state["worse_epochs"] += 1
else:
print("MODEL IMPROVED ON VALIDATION SET!")
state["worse_epochs"] = 0
state["best_loss"] = val_loss
state["best_checkpoint"] = checkpoint_path
best_checkpoint_path = args.model_path + 'best.pt'
best_state_dict_path = args.model_path + 'best_state_dict.pt'
save_model(model, optimizer, state, best_checkpoint_path)
torch.save(model.state_dict(), best_state_dict_path)
print(state)
state["epochs"] += 1
if state["worse_epochs"] > args.hold_step:
break
last_model = args.model_path + 'last_model.pt'
save_model(model, optimizer, state, last_model)
print("Training finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='{}/model_'.format(result_dir),
help='Path to save model')
parser.add_argument('--lr', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--output_size', type=float, default=2.0,
help='Output duration')
parser.add_argument('--sr', type=int, default=16000,
help='Sampling rate')
parser.add_argument('--channels', type=int, default=1,
help="Input channel, mono or sterno, default mono")
parser.add_argument('--load_model', type=str, default='',
help="Path of hdf5 file")
parser.add_argument("--load", type=bool, default=False)
parser.add_argument("--epochs", type=int, default=200,
help="Epochs of half lr")
parser.add_argument("--hold_step", type=int, default=20,
help="Epochs of hold step")
args = parser.parse_args()
main(args)
| [
"os.path.exists",
"models.resnet.resnet50_1d",
"os.listdir",
"os.makedirs",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"argparse.ArgumentParser",
"losses.loss.ContrastiveLoss_",
"torch.load",
"sklearn.model_selection.train_test_split",
"datetime.datetime.now",
"torch.no_grad",
"os.path.dirn... | [((654, 680), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (668, 680), False, 'import os\n'), ((686, 709), 'os.makedirs', 'os.makedirs', (['result_dir'], {}), '(result_dir)\n', (697, 709), False, 'import os\n'), ((1160, 1176), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1170, 1176), False, 'import torch\n'), ((1962, 1975), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1969, 1975), False, 'import os\n'), ((2204, 2226), 'os.listdir', 'os.listdir', (['total_path'], {}), '(total_path)\n', (2214, 2226), False, 'import os\n'), ((3452, 3508), 'dataset.industry_dataset_identify.IndustryDatasetIdentifyDiff', 'IndustryDatasetIdentifyDiff', (['sources_path', 'train_normals'], {}), '(sources_path, train_normals)\n', (3479, 3508), False, 'from dataset.industry_dataset_identify import IndustryDatasetIdentifyDiff, IndustryDatasetIdentify\n'), ((3527, 3588), 'dataset.industry_dataset_identify.IndustryDatasetIdentifyDiff', 'IndustryDatasetIdentifyDiff', (['sources_path', 'val_normals'], {'n': '(320)'}), '(sources_path, val_normals, n=320)\n', (3554, 3588), False, 'from dataset.industry_dataset_identify import IndustryDatasetIdentifyDiff, IndustryDatasetIdentify\n'), ((3609, 3676), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True)\n', (3619, 3676), False, 'from torch.utils.data import DataLoader\n'), ((3694, 3760), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size, shuffle=False)\n', (3704, 3760), False, 'from torch.utils.data import DataLoader\n'), ((4302, 4361), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'lr_scheduler.CosineAnnealingLR', (['optimizer', '(5)'], {'eta_min': '(1e-05)'}), '(optimizer, 5, eta_min=1e-05)\n', (4332, 4361), False, 'from torch.optim import lr_scheduler\n'), ((4542, 4560), 'losses.loss.ContrastiveLoss_', 'ContrastiveLoss_', ([], {}), '()\n', (4558, 4560), False, 'from losses.loss import ContrastiveLoss_\n'), ((4574, 4586), 'torch.cuda.amp.grad_scaler.GradScaler', 'GradScaler', ([], {}), '()\n', (4584, 4586), False, 'from torch.cuda.amp.grad_scaler import GradScaler\n'), ((6933, 6958), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6956, 6958), False, 'import argparse\n'), ((525, 548), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (546, 548), False, 'import datetime\n'), ((1530, 1545), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1543, 1545), False, 'import torch\n'), ((2420, 2482), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_files'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(normal_files, random_state=42, test_size=0.2)\n', (2436, 2482), False, 'from sklearn.model_selection import train_test_split\n'), ((4091, 4111), 'models.resnet.resnet50_1d', 'resnet50_1d', ([], {'n_spk': '(4)'}), '(n_spk=4)\n', (4102, 4111), False, 'from models.resnet import resnet50, Model, resnet50_1d\n'), ((863, 884), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (878, 884), False, 'import os\n'), ((3909, 3977), 'torch.load', 'torch.load', (['"""./result_identify/pump_fan_valve/all_ids/model_best.pt"""'], {}), "('./result_identify/pump_fan_valve/all_ids/model_best.pt')\n", (3919, 3977), False, 'import torch\n'), ((769, 790), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (784, 790), False, 'import os\n'), ((819, 840), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (834, 840), False, 'import os\n'), ((5271, 5281), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (5279, 5281), False, 'from torch.cuda.amp import autocast\n')] |
import os
import glob
import string
import re
import pathlib
import numpy as np
import pandas as pd
import joblib
from tqdm import tqdm
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from spacy.lang.en import English
from unidecode import unidecode
nlp = spacy.load("en_core_web_md")
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
punctuations = string.punctuation
stopwords = list(STOP_WORDS)
parser = English()
columns = [
"EMAIL_TRACKING_ID",
"SNDR_ID",
"msg_body_txt"
]
sub_start = "sent from my iPhone"
sub_end = " am"
sentphone_1 = re.compile(r'{}.*?{}'.format(re.escape(sub_start),re.escape(sub_end)))
sub_start = "sent from my iPhone"
sub_end = " pm"
sentphone_2 = re.compile(r'{}.*?{}'.format(re.escape(sub_start),re.escape(sub_end)))
# Remove specific sent from android instances
sub_start = "get outlook for android"
sub_end = " from:"
sentphone_3 = re.compile(r'{}.*?{}'.format(re.escape(sub_start),re.escape(sub_end)))
def remove_non_ascii(text):
return unidecode(text)
## Removing specific ebay_wrote instances
def remove_ebay_not(msg):
sub_start = "ebay -"
sub_end = "<> wrote:"
ebay_wrote = re.compile(r'{}.*?{}'.format(re.escape(sub_start),re.escape(sub_end)))
sub_start = "ebay -"
sub_end = "<> sent:"
ebay_sent = re.compile(r'{}.*?{}'.format(re.escape(sub_start),re.escape(sub_end)))
txt = ebay_sent.sub('',msg)
txt = ebay_wrote.sub('',txt)
return txt
## Remvoing specific sent from iphone instances
def remove_sent_phone(msg):
txt = sentphone_1.sub('',msg)
txt = sentphone_2.sub('',txt)
txt = sentphone_3.sub('',txt)
return txt
# TODO: revise how to remove css message
def css_message(msg):
# quick and dirty
css_words = ('font-size','text-decoration','font-family','text-align','margin-top','margin-bottom','text-align','font-weight')
if all([re.search(w, msg) for w in css_words]):
return ''
else:
return msg
def strip_tags(html):
# Python中过滤HTML标签的函数
# >>> str_text=strip_tags("<font color=red>hello</font>")
# >>> print str_text
# hello
from html.parser import HTMLParser
html = html.strip()
html = html.strip("\n")
result = []
parser = HTMLParser()
parser.handle_data = result.append
parser.feed(html)
parser.close()
return ''.join(result)
# Clean all redundant message
def sentence_level_cleaning(msg):
txt = css_message(msg)
txt = remove_non_ascii(txt)
txt = remove_ebay_not(txt)
txt = remove_sent_phone(txt)
txt = strip_tags(txt)
return txt
def generate_vector(msg):
result = nlp(msg)
return result.vector
def spacy_tokenizer(sentence):
sentence = sentence_level_cleaning(sentence)
sentence = re.sub('[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]',' ',sentence).strip()
mytokens = parser(sentence)
# lower case
mytokens = [ word.lower_ for word in mytokens ]
# stopwords & punctuations
mytokens = [ word for word in mytokens if word not in stopwords ]
# remove special characters
mytokens = [ re.sub('[^A-Za-z0-9]+', '', word) for word in mytokens]
# trim words length = 500
if len(mytokens)>=500:
mytokens = mytokens[:500]
mytokens = " ".join([i for i in mytokens if len(i)>0])
mytokens = mytokens.strip()
return mytokens
def _clean_impl(i, debug, path_data_clean, verbose=False):
data = pd.read_parquet(i)
# remove the null message
data = data[~data["msg_body_txt"].isnull()]
if debug:
data = data.sample(frac=0.001)
# Embed text into vector based on mytokens
if verbose:
tqdm.pandas()
data["msg_processed"] = data["msg_body_txt"].progress_apply(spacy_tokenizer)
else:
data["msg_processed"] = data["msg_body_txt"].apply(spacy_tokenizer)
if verbose:
tqdm.pandas()
data["vector"] = data["msg_processed"].progress_apply(generate_vector)
else:
data["vector"] = data["msg_processed"].apply(generate_vector)
path_dump = f'{path_data_clean}/{os.path.basename(i)}'
assert not os.path.exists(path_dump)
data.to_parquet(path_dump)
if debug:
print(f'{i} ==> {path_dump}')
def clean(path_data_raw, path_data_clean, n_jobs=8, debug=False, verbose=False):
if not os.path.isdir(path_data_clean):
os.makedirs(path_data_clean)
# Collect and read files
files = glob.glob(path_data_raw + '/*.parquet')
# data = pd.DataFrame()
with joblib.Parallel(n_jobs=n_jobs) as parallel:
_ = parallel(
joblib.delayed(_clean_impl)(
i, debug, path_data_clean, verbose=verbose
)
for i in tqdm(files, desc='cleaning')
)
pathlib.Path(f'{path_data_clean}/_SUCCESS').touch()
| [
"os.path.exists",
"re.escape",
"pandas.read_parquet",
"os.makedirs",
"pathlib.Path",
"spacy.load",
"spacy.lang.en.English",
"tqdm.tqdm",
"joblib.delayed",
"joblib.Parallel",
"os.path.isdir",
"html.parser.HTMLParser",
"os.path.basename",
"unidecode.unidecode",
"re.sub",
"tqdm.tqdm.panda... | [((273, 301), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (283, 301), False, 'import spacy\n'), ((439, 448), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (446, 448), False, 'from spacy.lang.en import English\n'), ((1024, 1039), 'unidecode.unidecode', 'unidecode', (['text'], {}), '(text)\n', (1033, 1039), False, 'from unidecode import unidecode\n'), ((2246, 2258), 'html.parser.HTMLParser', 'HTMLParser', ([], {}), '()\n', (2256, 2258), False, 'from html.parser import HTMLParser\n'), ((3418, 3436), 'pandas.read_parquet', 'pd.read_parquet', (['i'], {}), '(i)\n', (3433, 3436), True, 'import pandas as pd\n'), ((4416, 4455), 'glob.glob', 'glob.glob', (["(path_data_raw + '/*.parquet')"], {}), "(path_data_raw + '/*.parquet')\n", (4425, 4455), False, 'import glob\n'), ((617, 637), 're.escape', 're.escape', (['sub_start'], {}), '(sub_start)\n', (626, 637), False, 'import re\n'), ((638, 656), 're.escape', 're.escape', (['sub_end'], {}), '(sub_end)\n', (647, 656), False, 'import re\n'), ((753, 773), 're.escape', 're.escape', (['sub_start'], {}), '(sub_start)\n', (762, 773), False, 'import re\n'), ((774, 792), 're.escape', 're.escape', (['sub_end'], {}), '(sub_end)\n', (783, 792), False, 'import re\n'), ((942, 962), 're.escape', 're.escape', (['sub_start'], {}), '(sub_start)\n', (951, 962), False, 'import re\n'), ((963, 981), 're.escape', 're.escape', (['sub_end'], {}), '(sub_end)\n', (972, 981), False, 'import re\n'), ((3084, 3117), 're.sub', 're.sub', (['"""[^A-Za-z0-9]+"""', '""""""', 'word'], {}), "('[^A-Za-z0-9]+', '', word)\n", (3090, 3117), False, 'import re\n'), ((3644, 3657), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (3655, 3657), False, 'from tqdm import tqdm\n'), ((3854, 3867), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (3865, 3867), False, 'from tqdm import tqdm\n'), ((4102, 4127), 'os.path.exists', 'os.path.exists', (['path_dump'], {}), '(path_dump)\n', (4116, 4127), False, 'import os\n'), ((4305, 4335), 'os.path.isdir', 'os.path.isdir', (['path_data_clean'], {}), '(path_data_clean)\n', (4318, 4335), False, 'import os\n'), ((4345, 4373), 'os.makedirs', 'os.makedirs', (['path_data_clean'], {}), '(path_data_clean)\n', (4356, 4373), False, 'import os\n'), ((4493, 4523), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (4508, 4523), False, 'import joblib\n'), ((1206, 1226), 're.escape', 're.escape', (['sub_start'], {}), '(sub_start)\n', (1215, 1226), False, 'import re\n'), ((1227, 1245), 're.escape', 're.escape', (['sub_end'], {}), '(sub_end)\n', (1236, 1245), False, 'import re\n'), ((1344, 1364), 're.escape', 're.escape', (['sub_start'], {}), '(sub_start)\n', (1353, 1364), False, 'import re\n'), ((1365, 1383), 're.escape', 're.escape', (['sub_end'], {}), '(sub_end)\n', (1374, 1383), False, 'import re\n'), ((1891, 1908), 're.search', 're.search', (['w', 'msg'], {}), '(w, msg)\n', (1900, 1908), False, 'import re\n'), ((2765, 2826), 're.sub', 're.sub', (['"""[!"#$%&\'()*+,-./:;<=>?@[\\\\]^_`{|}~]"""', '""" """', 'sentence'], {}), '(\'[!"#$%&\\\'()*+,-./:;<=>?@[\\\\]^_`{|}~]\', \' \', sentence)\n', (2771, 2826), False, 'import re\n'), ((4065, 4084), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (4081, 4084), False, 'import os\n'), ((4737, 4780), 'pathlib.Path', 'pathlib.Path', (['f"""{path_data_clean}/_SUCCESS"""'], {}), "(f'{path_data_clean}/_SUCCESS')\n", (4749, 4780), False, 'import pathlib\n'), ((4571, 4598), 'joblib.delayed', 'joblib.delayed', (['_clean_impl'], {}), '(_clean_impl)\n', (4585, 4598), False, 'import joblib\n'), ((4694, 4722), 'tqdm.tqdm', 'tqdm', (['files'], {'desc': '"""cleaning"""'}), "(files, desc='cleaning')\n", (4698, 4722), False, 'from tqdm import tqdm\n')] |
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
from ..fusioncharts import FusionTable
from ..fusioncharts import TimeSeries
import requests
# Loading Data and schema from a Static JSON String url
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
data = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json').text
schema = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/schema/single-event-overlay-schema.json').text
fusionTable = FusionTable(schema, data)
timeSeries = TimeSeries(fusionTable)
timeSeries.AddAttribute("caption", """{
text: 'Interest Rate Analysis'
}""")
timeSeries.AddAttribute("subCaption", """{
text: 'Federal Reserve (USA)'
}""")
timeSeries.AddAttribute("yAxis", """[{
plot: 'Interest Rate',
format:{
suffix: '%'
},
title: 'Interest Rate'
}]""")
timeSeries.AddAttribute("xAxis", """{
plot: 'Time',
timemarker: [{
start: 'Mar-1980',
label: 'US inflation peaked at 14.8%.',
timeFormat: ' %b -%Y',
style: {
marker:
{
fill: '#D0D6F4'
}
}
}, {
start: 'May-1981',
label: 'To control inflation, the Fed started {br} raising interest rates to over {br} 20%.',
timeFormat: '%b-%Y'
}, {
start: 'Jun-1983',
label: 'By proactive actions of Mr.Volcker, {br} the inflation falls to 2.4% {br} from the peak of over 14% {br} just three years ago.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#D0D6F4'
}
}
}, {
start: 'Oct-1987',
label: 'The Dow Jones Industrial Average lost {br} about 30% of it’s value.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#FBEFCC'
}
}
}, {
start: 'Jan-1989',
label: '<NAME> becomes {br} the 41st president of US!',
timeFormat: '%b-%Y'
}, {
start: 'Aug-1990',
label: 'The oil prices spiked to $35 {br} per barrel from $15 per barrel {br} because of the Gulf War.',
timeFormat: '%b-%Y'
}, {
start: 'Dec-1996',
label: '<NAME> warns of the dangers {br} of \"irrational exuberance\" in financial markets, {br} an admonition that goes unheeded',
timeFormat: '%b-%Y'
}, {
start: 'Sep-2008',
label: '<NAME> collapsed!',
timeFormat: '%b-%Y'
},{
start: 'Mar-2009',
label: 'The net worth of US households {br} stood at a trough of $55 trillion.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#FBEFCC'
}
}
}, {
start: 'Oct-2009',
label: 'Unemployment rate peaked {br} in given times to 10%.',
timeFormat: '%b-%Y'
}]
}""")
# Create an object for the chart using the FusionCharts class constructor
fcChart = FusionCharts("timeseries", "ex1", 700, 450, "chart-1", "json", timeSeries)
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'index.html', {'output' : fcChart.render(),'chartTitle': "Single event overlay"}) | [
"requests.get"
] | [((468, 583), 'requests.get', 'requests.get', (['"""https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json"""'], {}), "(\n 'https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json'\n )\n", (480, 583), False, 'import requests\n'), ((592, 711), 'requests.get', 'requests.get', (['"""https://s3.eu-central-1.amazonaws.com/fusion.store/ft/schema/single-event-overlay-schema.json"""'], {}), "(\n 'https://s3.eu-central-1.amazonaws.com/fusion.store/ft/schema/single-event-overlay-schema.json'\n )\n", (604, 711), False, 'import requests\n')] |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='pymixconsole',
version='0.0.1',
description='Headless multitrack mixing console in Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/csteinmetz1/pymixconsole',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
package_data={'pymixconsole': ['irs/*.wav']},
include_package_data=True,
install_requires=['scipy>=1.0.1',
'numpy>=1.14.2',
'numba>=0.46.0',
'graphviz>=0.13.2'],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
) | [
"setuptools.find_packages"
] | [((449, 464), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (462, 464), False, 'from setuptools import setup, find_packages\n')] |
"""
This script was modified from https://github.com/ZhaoJ9014/face.evoLVe.PyTorch
"""
import os
import cv2
import bcolz
import numpy as np
import tqdm
from sklearn.model_selection import KFold
from scipy import interpolate
import math
from .utils import l2_norm
def get_val_pair(path, name):
carray = bcolz.carray(rootdir=os.path.join(path, name), mode='r')
issame = np.load('{}/{}_list.npy'.format(path, name))
return carray, issame
def get_val_data(lfw_data_path=None, agedb_path=None, cfp_path=None):
lfw, lfw_issame, agedb_30, agedb_30_issame, cfp_fp, cfp_fp_issame = None,None,None,None,None,None
if lfw_data_path:
lfw, lfw_issame = get_val_pair(lfw_data_path, 'lfw')
if agedb_path:
agedb_30, agedb_30_issame = get_val_pair(agedb_path, 'agedb_30')
if cfp_path:
cfp_fp, cfp_fp_issame = get_val_pair(cfp_path, 'cfp_fp')
return lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame
def ccrop_batch(imgs):
assert len(imgs.shape) == 4
resized_imgs = np.array([cv2.resize(img, (128, 128)) for img in imgs])
ccropped_imgs = resized_imgs[:, 8:-8, 8:-8, :]
return ccropped_imgs
def hflip_batch(imgs):
assert len(imgs.shape) == 4
return imgs[:, :, ::-1, :]
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame,
nrof_folds=10, distance_metric=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_thresholds = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
dist = distance(embeddings1, embeddings2, distance_metric)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
best_thresholds[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = \
calculate_accuracy(threshold,
dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index],
dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, best_thresholds
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
dist = distance(embeddings1, embeddings2, distance_metric)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def evaluate(embeddings, actual_issame, nrof_folds=10, distance_metric=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds = calculate_roc(
thresholds, embeddings1, embeddings2, np.asarray(actual_issame),
nrof_folds=nrof_folds, distance_metric=distance_metric)
val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds, distance_metric=distance_metric)
return tpr, fpr, accuracy, best_thresholds, val, val_std, far
def perform_val(embedding_size, batch_size, model,
carray, issame, nrof_folds=10, is_ccrop=False, is_flip=True):
"""perform val"""
embeddings = np.zeros([len(carray), embedding_size])
for idx in tqdm.tqdm(range(0, len(carray), batch_size)):
batch = carray[idx:idx + batch_size]
batch = np.transpose(batch, [0, 2, 3, 1]) * 0.5 + 0.5
batch = batch[:, :, :, ::-1] # convert BGR to RGB
if is_ccrop:
batch = ccrop_batch(batch)
if is_flip:
fliped = hflip_batch(batch)
emb_batch = model(batch) + model(fliped)
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
else:
emb_batch = model(batch)
embeddings[idx:idx + batch_size] = l2_norm(emb_batch)
tpr, fpr, accuracy, best_thresholds, val, val_std, far = evaluate(
embeddings, issame, nrof_folds)
return accuracy.mean(), best_thresholds.mean(), accuracy.std(), val, val_std, far
| [
"numpy.arccos",
"numpy.logical_not",
"scipy.interpolate.interp1d",
"numpy.linalg.norm",
"sklearn.model_selection.KFold",
"numpy.arange",
"numpy.mean",
"numpy.less",
"numpy.multiply",
"numpy.asarray",
"numpy.subtract",
"numpy.max",
"numpy.argmax",
"numpy.square",
"numpy.std",
"cv2.resiz... | [((1952, 1976), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (1959, 1976), True, 'import numpy as np\n'), ((2867, 2908), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (2872, 2908), False, 'from sklearn.model_selection import KFold\n'), ((2921, 2960), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (2929, 2960), True, 'import numpy as np\n'), ((2972, 3011), 'numpy.zeros', 'np.zeros', (['(nrof_folds, nrof_thresholds)'], {}), '((nrof_folds, nrof_thresholds))\n', (2980, 3011), True, 'import numpy as np\n'), ((3027, 3047), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (3035, 3047), True, 'import numpy as np\n'), ((3072, 3092), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (3080, 3092), True, 'import numpy as np\n'), ((3109, 3130), 'numpy.arange', 'np.arange', (['nrof_pairs'], {}), '(nrof_pairs)\n', (3118, 3130), True, 'import numpy as np\n'), ((4165, 4181), 'numpy.mean', 'np.mean', (['tprs', '(0)'], {}), '(tprs, 0)\n', (4172, 4181), True, 'import numpy as np\n'), ((4192, 4208), 'numpy.mean', 'np.mean', (['fprs', '(0)'], {}), '(fprs, 0)\n', (4199, 4208), True, 'import numpy as np\n'), ((4333, 4357), 'numpy.less', 'np.less', (['dist', 'threshold'], {}), '(dist, threshold)\n', (4340, 4357), True, 'import numpy as np\n'), ((4532, 4553), 'numpy.sum', 'np.sum', (['actual_issame'], {}), '(actual_issame)\n', (4538, 4553), True, 'import numpy as np\n'), ((5085, 5126), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nrof_folds', 'shuffle': '(False)'}), '(n_splits=nrof_folds, shuffle=False)\n', (5090, 5126), False, 'from sklearn.model_selection import KFold\n'), ((5142, 5162), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (5150, 5162), True, 'import numpy as np\n'), ((5173, 5193), 'numpy.zeros', 'np.zeros', (['nrof_folds'], {}), '(nrof_folds)\n', (5181, 5193), True, 'import numpy as np\n'), ((5213, 5234), 'numpy.arange', 'np.arange', (['nrof_pairs'], {}), '(nrof_pairs)\n', (5222, 5234), True, 'import numpy as np\n'), ((5998, 6010), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (6005, 6010), True, 'import numpy as np\n'), ((6026, 6038), 'numpy.mean', 'np.mean', (['far'], {}), '(far)\n', (6033, 6038), True, 'import numpy as np\n'), ((6053, 6064), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (6059, 6064), True, 'import numpy as np\n'), ((6232, 6253), 'numpy.arange', 'np.arange', (['(0)', '(4)', '(0.01)'], {}), '(0, 4, 0.01)\n', (6241, 6253), True, 'import numpy as np\n'), ((1385, 1422), 'numpy.subtract', 'np.subtract', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (1396, 1422), True, 'import numpy as np\n'), ((1993, 2038), 'numpy.logical_and', 'np.logical_and', (['predict_issame', 'actual_issame'], {}), '(predict_issame, actual_issame)\n', (2007, 2038), True, 'import numpy as np\n'), ((3340, 3365), 'numpy.zeros', 'np.zeros', (['nrof_thresholds'], {}), '(nrof_thresholds)\n', (3348, 3365), True, 'import numpy as np\n'), ((3597, 3617), 'numpy.argmax', 'np.argmax', (['acc_train'], {}), '(acc_train)\n', (3606, 3617), True, 'import numpy as np\n'), ((4383, 4428), 'numpy.logical_and', 'np.logical_and', (['predict_issame', 'actual_issame'], {}), '(predict_issame, actual_issame)\n', (4397, 4428), True, 'import numpy as np\n'), ((4574, 4603), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (4588, 4603), True, 'import numpy as np\n'), ((5465, 5490), 'numpy.zeros', 'np.zeros', (['nrof_thresholds'], {}), '(nrof_thresholds)\n', (5473, 5490), True, 'import numpy as np\n'), ((6427, 6452), 'numpy.asarray', 'np.asarray', (['actual_issame'], {}), '(actual_issame)\n', (6437, 6452), True, 'import numpy as np\n'), ((6602, 6627), 'numpy.asarray', 'np.asarray', (['actual_issame'], {}), '(actual_issame)\n', (6612, 6627), True, 'import numpy as np\n'), ((329, 353), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (341, 353), False, 'import os\n'), ((1043, 1070), 'cv2.resize', 'cv2.resize', (['img', '(128, 128)'], {}), '(img, (128, 128))\n', (1053, 1070), False, 'import cv2\n'), ((1445, 1460), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (1454, 1460), True, 'import numpy as np\n'), ((2087, 2116), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (2101, 2116), True, 'import numpy as np\n'), ((2150, 2180), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (2164, 2180), True, 'import numpy as np\n'), ((2213, 2242), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (2227, 2242), True, 'import numpy as np\n'), ((2276, 2306), 'numpy.logical_not', 'np.logical_not', (['predict_issame'], {}), '(predict_issame)\n', (2290, 2306), True, 'import numpy as np\n'), ((4487, 4516), 'numpy.logical_not', 'np.logical_not', (['actual_issame'], {}), '(actual_issame)\n', (4501, 4516), True, 'import numpy as np\n'), ((5679, 5696), 'numpy.max', 'np.max', (['far_train'], {}), '(far_train)\n', (5685, 5696), True, 'import numpy as np\n'), ((5726, 5785), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['far_train', 'thresholds'], {'kind': '"""slinear"""'}), "(far_train, thresholds, kind='slinear')\n", (5746, 5785), False, 'from scipy import interpolate\n'), ((1560, 1597), 'numpy.multiply', 'np.multiply', (['embeddings1', 'embeddings2'], {}), '(embeddings1, embeddings2)\n', (1571, 1597), True, 'import numpy as np\n'), ((1622, 1657), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings1'], {'axis': '(1)'}), '(embeddings1, axis=1)\n', (1636, 1657), True, 'import numpy as np\n'), ((1660, 1695), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings2'], {'axis': '(1)'}), '(embeddings2, axis=1)\n', (1674, 1695), True, 'import numpy as np\n'), ((1743, 1764), 'numpy.arccos', 'np.arccos', (['similarity'], {}), '(similarity)\n', (1752, 1764), True, 'import numpy as np\n'), ((7090, 7123), 'numpy.transpose', 'np.transpose', (['batch', '[0, 2, 3, 1]'], {}), '(batch, [0, 2, 3, 1])\n', (7102, 7123), True, 'import numpy as np\n')] |
import logging
import random
import gzip
import filetype
from moonstone.normalization.reads.base import BaseDownsizing
logger = logging.getLogger(__name__)
class DownsizePair(BaseDownsizing):
"""Normalization for the purposes of assessing diversity. Reads are downsized by random selection of raw reads
generating a subset from which alpha diversity can be calculated.
Note that removal of data, while useful for diversity assessment, is no longer considered good practice.
https://doi.org/10.1371/journal.pcbi.1003531
"""
def __init__(self, raw_file_f, raw_file_r, read_info=None, in_dir='./', out_dir='./', n=1000, seed=62375):
"""Paired reads assumes forward and reverse FASTQ files.
n is the number of reads that will be randomly picked, with a default of 1000.
A random seed is preset to 62375 to allow for reproducibility"""
super().__init__(raw_file_f, raw_file_r)
self.downsize_to = n
self.seed = seed
self.in_dir = in_dir
self.out_dir = out_dir
self.read_info = read_info # If provided, contains [header, F/R, Number of reads, format]
# e.g. ['@A00709:44:HYG57DSXX:2:1101:10737:1266', '1', 100257, 'Uncompressed/FASTQ']
self.starting_reads = None
self.file_type = None
self._file_type = False # In case no info is provided this remains false
self._starting_reads = False
if self.raw_file_f == self.raw_file_r:
logger.error(f"Files {self.raw_file_f} and {self.raw_file_r} are the same! Expected Forward and Reverse!")
if read_info:
self.starting_reads = read_info[2]
self._starting_reads = True
self.file_type = read_info[3]
self._file_type = True # If file type has been provided.
@property
def find_file_type(self):
"""Simple function to determine the filetype. This is generally uncompressed FASTQ or GZipped compressed.
If the filetype is provided, that that value is returned.
"""
if self._file_type:
return self.file_type
else:
self.file_type = filetype.guess(self.raw_file_f).mime
# Uncompressed yields NONE, while gzip yields 'application/gzip'
if not self.file_type:
self.file_type = 'Uncompressed/FASTQ' # Assumed here, but could add more checks for real FASTQ.
self._file_type = True
logger.info('File type for %s and its pair is %s' % (self.raw_file_f, self.file_type))
return self.file_type
@property
def count_starting_reads(self):
"""The function first checks to see if the starting_reads variable has already been set. If not, the filetype
is determined and then the appropriate means of opening the file applied. Read # is determined by counting
lines and dividing by 4, as per the FASTQ format. In all cases, the number of starting reads is returned.
"""
if self._starting_reads:
return self.starting_reads
else:
if not self._file_type:
self.find_file_type()
if self.file_type == 'Uncompressed/FASTQ':
self.starting_reads: int = sum(1 for _ in open(self.in_dir + self.raw_file_f)) // 4
if self.file_type == 'application/gzip':
self.starting_reads: int = sum(1 for _ in gzip.open(self.in_dir + self.raw_file_f)) // 4
logger.info('Found %i reads' % self.starting_reads)
self._starting_reads = True
return self.starting_reads
def downsize_pair_uncompressed(self):
"""Selects a pseudo-random list of reads from the sequence file and returns the downsized file in the
same format. The seed for generating the list of reads to select is set during instantiation.
"""
random.seed(self.seed)
if not self._starting_reads:
self.starting_reads = self.count_starting_reads()
rand_reads: list = sorted([random.randint(0, self.starting_reads - 1) for _ in range(self.downsize_to)])
forward_reads = open(self.in_dir + self.raw_file_f, 'r')
reverse_reads = open(self.in_dir + self.raw_file_r, 'r')
downsized_forward = open(self.out_dir+"downsized."+self.raw_file_f, "w+")
downsized_reverse = open(self.out_dir+"downsized."+self.raw_file_r, "w+")
rec_no = -1
for rr in rand_reads:
# Read records in the file (groups of 4 for fastq)
# Until the record being read is no longer less that one of the ordered random (oxymoron?) records
while rec_no < rr:
rec_no += 1
for i in range(4):
forward_reads.readline()
reverse_reads.readline()
# Once rec_no == rr (we find a matching record), we write that record, forward and reverse.
for i in range(4):
downsized_forward.write(forward_reads.readline())
downsized_reverse.write(reverse_reads.readline())
rec_no += 1
# Close raw read files.
forward_reads.close()
reverse_reads.close()
# Reset file objects of downsized reads so we can count them. Count them.
downsized_forward.seek(0)
downsized_reverse.seek(0)
downsized_forward_count = sum(1 for _ in downsized_forward) / 4
downsized_reverse_count = sum(1 for _ in downsized_reverse) / 4
# Close the downsized files.
downsized_forward.close()
downsized_reverse.close()
logger.info('Wrote %i reads to to %s.\nWrote %i reads to %s' % (downsized_forward_count, downsized_forward.name,
downsized_reverse_count, downsized_reverse.name)
)
def downsize_pair_gzip(self):
"""Same as 'downsize_pair' module, but made for gzip compressed files. This module returns files using the
same compression"""
random.seed(self.seed)
if not self._starting_reads:
self.starting_reads = self.count_starting_reads()
rand_reads: list = sorted([random.randint(0, self.starting_reads - 1) for _ in range(self.downsize_to)])
forward_reads = gzip.open(self.in_dir + self.raw_file_f, 'rb')
reverse_reads = gzip.open(self.in_dir + self.raw_file_r, 'rb')
downsized_forward = gzip.open(self.out_dir+"downsized."+self.raw_file_f, "wb+")
downsized_reverse = gzip.open(self.out_dir+"downsized."+self.raw_file_r, "wb+")
rec_no = -1
reads_written = 0
for rr in rand_reads:
while rec_no < rr:
rec_no += 1
for i in range(4):
forward_reads.readline()
reverse_reads.readline()
for i in range(4):
downsized_forward.write(forward_reads.readline())
downsized_reverse.write(reverse_reads.readline())
reads_written += 1
rec_no += 1
forward_reads.close()
reverse_reads.close()
downsized_forward.close()
downsized_reverse.close()
with gzip.open(self.out_dir+"downsized."+self.raw_file_f, "rb") as f:
downsized_forward_count = sum(1 for _ in f) / 4
with gzip.open(self.out_dir+"downsized."+self.raw_file_r, "rb") as r:
downsized_reverse_count = sum(1 for _ in r) / 4
logger.info('\nWrote %i reads to to %s.\nWrote %i reads to %s' % (downsized_forward_count,
downsized_forward.name,
downsized_reverse_count,
downsized_reverse.name)
)
def downsize_single(self):
pass
def downsize_pair(self):
self.find_file_type
self.count_starting_reads
if self.file_type == 'Uncompressed/FASTQ':
logging.info('Running uncompressed downsizing')
self.downsize_pair_uncompressed()
if self.file_type == 'application/gzip':
logger.info('Running gzip downsizing')
self.downsize_pair_gzip()
| [
"logging.getLogger",
"filetype.guess",
"gzip.open",
"random.seed",
"logging.info",
"random.randint"
] | [((130, 157), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (147, 157), False, 'import logging\n'), ((3892, 3914), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (3903, 3914), False, 'import random\n'), ((6067, 6089), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (6078, 6089), False, 'import random\n'), ((6329, 6375), 'gzip.open', 'gzip.open', (['(self.in_dir + self.raw_file_f)', '"""rb"""'], {}), "(self.in_dir + self.raw_file_f, 'rb')\n", (6338, 6375), False, 'import gzip\n'), ((6400, 6446), 'gzip.open', 'gzip.open', (['(self.in_dir + self.raw_file_r)', '"""rb"""'], {}), "(self.in_dir + self.raw_file_r, 'rb')\n", (6409, 6446), False, 'import gzip\n'), ((6475, 6538), 'gzip.open', 'gzip.open', (["(self.out_dir + 'downsized.' + self.raw_file_f)", '"""wb+"""'], {}), "(self.out_dir + 'downsized.' + self.raw_file_f, 'wb+')\n", (6484, 6538), False, 'import gzip\n'), ((6563, 6626), 'gzip.open', 'gzip.open', (["(self.out_dir + 'downsized.' + self.raw_file_r)", '"""wb+"""'], {}), "(self.out_dir + 'downsized.' + self.raw_file_r, 'wb+')\n", (6572, 6626), False, 'import gzip\n'), ((7248, 7310), 'gzip.open', 'gzip.open', (["(self.out_dir + 'downsized.' + self.raw_file_f)", '"""rb"""'], {}), "(self.out_dir + 'downsized.' + self.raw_file_f, 'rb')\n", (7257, 7310), False, 'import gzip\n'), ((7386, 7448), 'gzip.open', 'gzip.open', (["(self.out_dir + 'downsized.' + self.raw_file_r)", '"""rb"""'], {}), "(self.out_dir + 'downsized.' + self.raw_file_r, 'rb')\n", (7395, 7448), False, 'import gzip\n'), ((8129, 8176), 'logging.info', 'logging.info', (['"""Running uncompressed downsizing"""'], {}), "('Running uncompressed downsizing')\n", (8141, 8176), False, 'import logging\n'), ((2155, 2186), 'filetype.guess', 'filetype.guess', (['self.raw_file_f'], {}), '(self.raw_file_f)\n', (2169, 2186), False, 'import filetype\n'), ((4051, 4093), 'random.randint', 'random.randint', (['(0)', '(self.starting_reads - 1)'], {}), '(0, self.starting_reads - 1)\n', (4065, 4093), False, 'import random\n'), ((6226, 6268), 'random.randint', 'random.randint', (['(0)', '(self.starting_reads - 1)'], {}), '(0, self.starting_reads - 1)\n', (6240, 6268), False, 'import random\n'), ((3426, 3466), 'gzip.open', 'gzip.open', (['(self.in_dir + self.raw_file_f)'], {}), '(self.in_dir + self.raw_file_f)\n', (3435, 3466), False, 'import gzip\n')] |
#!/usr/bin/env python
import time
from pyplanter.lib.light import Light
from pyplanter.logger import logger
def main():
"""
Get the sunrise and sunset data for the day
Usage: python pyplanter/scripts/light.py
"""
while True:
logger.debug("Updating light data")
light = Light()
light.get_latest_data()
light.save()
time.sleep(60)
if __name__ == "__main__":
main()
| [
"pyplanter.logger.logger.debug",
"time.sleep",
"pyplanter.lib.light.Light"
] | [((257, 292), 'pyplanter.logger.logger.debug', 'logger.debug', (['"""Updating light data"""'], {}), "('Updating light data')\n", (269, 292), False, 'from pyplanter.logger import logger\n'), ((309, 316), 'pyplanter.lib.light.Light', 'Light', ([], {}), '()\n', (314, 316), False, 'from pyplanter.lib.light import Light\n'), ((379, 393), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (389, 393), False, 'import time\n')] |
from flask import current_app
from app.server import app_logger
from app.server import ContextEnvironment
from app.server import sms
from app.server.models.user import User
def send_sms(message: str, phone_number: str):
context_env = ContextEnvironment(current_app)
if context_env.is_development or context_env.is_testing:
app_logger.info(
"IS NOT PRODUCTION NOT ACTUALLY SENDING:\n"
f"Recipient: {phone_number}\n"
f"Message: {message}"
)
else:
# TODO: [Philip] Make SMS asynchronous
response = sms.send(message=message, recipients=[phone_number])
return response
def send_one_time_pin(user: User):
otp = user.set_otp_secret()
message = f"Hello {user.given_names}, your activation code is: {otp}"
send_sms(message=message, phone_number=user.phone)
| [
"app.server.ContextEnvironment",
"app.server.app_logger.info",
"app.server.sms.send"
] | [((241, 272), 'app.server.ContextEnvironment', 'ContextEnvironment', (['current_app'], {}), '(current_app)\n', (259, 272), False, 'from app.server import ContextEnvironment\n'), ((342, 460), 'app.server.app_logger.info', 'app_logger.info', (['f"""IS NOT PRODUCTION NOT ACTUALLY SENDING:\nRecipient: {phone_number}\nMessage: {message}"""'], {}), '(\n f"""IS NOT PRODUCTION NOT ACTUALLY SENDING:\nRecipient: {phone_number}\nMessage: {message}"""\n )\n', (357, 460), False, 'from app.server import app_logger\n'), ((578, 630), 'app.server.sms.send', 'sms.send', ([], {'message': 'message', 'recipients': '[phone_number]'}), '(message=message, recipients=[phone_number])\n', (586, 630), False, 'from app.server import sms\n')] |
from html.entities import name2codepoint
from numpy import isin
from MyGrammerParser import MyGrammerParser
from MyGrammerVisitor import MyGrammerVisitor
from MusicNodes import *
from music21 import *
import string
import copy
def appendInstrument(part, instru):
switcher = {
"clarinet": instrument.Clarinet(),
"flute": instrument.Flute(),
"organ": instrument.Organ(),
"piano": instrument.Piano(),
"recorder": instrument.Recorder(),
"saxophone": instrument.Saxophone(),
"trumpet": instrument.Trumpet(),
"guitar": instrument.Guitar(),
"acoustic guitar": instrument.AcousticGuitar(),
"electric guitar": instrument.ElectricGuitar(),
"ukulele": instrument.Ukulele(),
"violin": instrument.Violin(),
"xylophone": instrument.Xylophone(),
}
part.append(switcher[instru.getText().lower()])
def createTupletNote(num, accidental, pitch, dotted, quarterLength):
if accidental == "_":
accidental = "-"
elif accidental == "None":
accidental = ""
m_note = note.Note(pitch + accidental + num)
d = duration.Duration(quarterLength)
m_note.duration = d
print(pitch + accidental + num)
print(m_note, m_note.duration)
print(m_note.duration.type, m_note.duration.dots, m_note.duration.quarterLength)
print(m_note.fullName)
input()
if dotted:
m_note.quarterLength = m_note.quarterLength + (m_note.quarterLength / 2)
return m_note
def getNoteDuration(note):
val = 0
if note == "quarter":
val = 1.0
if note == "eighth":
d = duration.Duration(type="eighth")
val = d.quarterLength
if note == "sixteenth":
val = 0.25
if note == "full":
d = duration.Duration(type="whole")
val = d.quarterLength
if note == "double":
val = 2.0
if note == "half":
d = duration.Duration(type="half")
val = d.quarterLength
return val
def createChord(note_arr, val, is_dotted):
arr = []
for num, pitch, accidental in note_arr:
if accidental == "_":
accidental = "-"
elif accidental == "None":
accidental = ""
arr.append(str(pitch) + str(accidental) + str(num))
print("chord arr", arr)
new_chord = chord.Chord(arr)
if val == "eighth":
d = duration.Duration(type="eighth")
new_chord.duration = d
if val == "sixteenth":
d = duration.Duration(type="16th")
new_chord.duration = d
if val == "thirtysecond":
d = duration.Duration(type="32nd")
new_chord.duration = d
if val == "full":
d = duration.Duration(type="whole")
new_chord.duration = d
if val == "double":
new_chord.quarterLength = 2.0
if val == "half":
d = duration.Duration(type="half")
new_chord.duration = d
if is_dotted:
new_chord.quarterLength = new_chord.quarterLength + (new_chord.quarterLength / 2)
return new_chord
def printExprNote(note: ExprNoteNode):
print(note.note_value, note.pitch, note.num, note.dotted)
def printExprRest(note: ExprRestNode):
print(note.note_value, note.dotted)
def createNote(num, accidental, pitch, val, dotted):
if accidental == "_":
accidental = "-"
elif accidental == "None":
accidental = ""
test = pitch + accidental + num
print("create note", test)
m_note = note.Note(pitch + accidental + num)
#update note duration
if val == "eighth":
d = duration.Duration(type="eighth")
m_note.duration = d
if val == "sixteenth":
d = duration.Duration(type="16th")
m_note.duration = d
if val == "thirtysecond":
d = duration.Duration(type="32nd")
m_note.duration = d
if val == "full":
d = duration.Duration(type="whole")
m_note.duration = d
if val == "double":
m_note.quarterLength = 2.0
if val == "half":
d = duration.Duration(type="half")
m_note.duration = d
if dotted:
m_note.quarterLength = m_note.quarterLength + (m_note.quarterLength /
2)
return m_note
def createFixedChord(note_value, num, fixed_chord, dotted):
chords = ExprFixedChordNode.chords[fixed_chord.lower()]
chords = [chord[0] + str(chord[1] + int(str(num))) for chord in chords]
new_chord = chord.Chord(chords)
if note_value == "eighth":
d = duration.Duration(type="eighth")
new_chord.quarterLength = d.quarterLength
if note_value == "sixteenth":
new_chord.quarterLength = 0.25
if note_value == "thirtysecond":
new_chord.quarterLength = 0.125
if note_value == "full":
d = duration.Duration(type="whole")
new_chord.quarterLength = d.quarterLength
if note_value == "double":
new_chord.quarterLength = 2.0
if note_value == "half":
d = duration.Duration(type="half")
new_chord.quarterLength = d.quarterLength
if dotted:
new_chord.quarterLength = new_chord.quarterLength + (new_chord.quarterLength / 2)
return new_chord
def createRest(val, dotted):
m_rest = note.Rest()
#update rest duration
if val == "eighth":
d = duration.Duration(type="eighth")
m_rest.quarterLength = d.quarterLength
if val == "sixteenth":
m_rest.quarterLength = 0.25
if val == "thirtysecond":
m_rest.quarterLength = 0.125
if val == "full":
d = duration.Duration(type="whole")
m_rest.quarterLength = d.quarterLength
if val == "double":
m_rest.quarterLength = 2.0
if val == "half":
d = duration.Duration(type="half")
m_rest.quarterLength = d.quarterLength
if dotted:
m_rest.quarterLength = m_rest.quarterLength + (m_rest.quarterLength / 2)
return m_rest
def printExprChord(chord: ExprChordNode):
print("chord (")
for note in chord.notes:
printExprNote(note)
print(")")
def valToBeat(cur_val, bottom, dotted):
beat_num = 0
if cur_val == "thirtysecond":
beat_num = bottom / 32
if cur_val == "sixteenth":
beat_num = bottom / 16
elif cur_val == "eighth":
beat_num = bottom / 8
elif cur_val == "quarter":
beat_num = bottom / 4
elif cur_val == "half":
beat_num = bottom / 2
elif cur_val == "full":
beat_num = bottom
elif cur_val == "double":
beat_num = bottom * 2
if dotted:
beat_num += beat_num / 2
return beat_num
def processExprChord(chord_notes, type):
expected_note_val = ""
is_dotted = False
for idx, n in enumerate(
chord_notes
): # Checking if all notes in chord have same note_value
if type == "EXPR":
if bool(n.dotted):
is_dotted = True
if idx == 0:
expected_note_val = str(n.note_value)
else:
if str(n.note_value) != expected_note_val:
line = n.note_value.getSymbol().line
col = n.note_value.getSymbol().column
raise Exception(
"Mismatch in note values, all notes within a chord must have the same note value",
line, col)
else:
if bool(n[4]):
is_dotted = True
if idx == 0:
expected_note_val = str(n[0])
return expected_note_val, is_dotted
class Staff():
def __init__(self, beats_per_measure, note_value, melodyVariable):
self.melodyVariable = melodyVariable
self.beats_per_measure = beats_per_measure
self.note_value = note_value
self.expressions = []
class MusicEvaluator(MyGrammerVisitor):
bpm = None
instrument = None
checkInst = None
grandInst = ["organ", "piano"]
variables = {}
staffs = []
music_stream = stream.Score()
repeat_ctr = []
ending_ctr = []
ending_values = []
ending_id = []
right = stream.Part()
left = stream.Part()
def evaluateExprNoteNode(self, ctx: ExprNoteNode):
note_value = ctx.note_value.getText()
accidental = None
if ctx.accidental is not None:
accidental = ctx.accidental.getText()
pitch = ctx.pitch.getText()
num = ctx.num.getText()
dotted = ctx.dotted
return note_value, accidental, pitch, num, dotted
def evaluateDeclaredNotes(self, ctx: MyGrammerParser.Declare_noteContext):
# DECLARED NOTES
for note in ctx:
# Get all the DeclareNoteNodes
temp = MyGrammerVisitor().visitDeclare_note(note)
if temp.identifier.getText() not in self.variables:
note_value, accidental, pitch, num, dotted = self.evaluateExprNoteNode(
temp.note)
self.variables[temp.identifier.getText()] = ("NOTE",
note_value,
accidental, pitch,
num, dotted)
else:
line = temp.identifier.getSymbol().line
col = temp.identifier.getSymbol().column
raise Exception(
"Reassignment is not allowed. Use a different identifier",
line, col)
def evaluateDeclaredChords(self,
ctx: MyGrammerParser.Declare_chordContext):
for chord in ctx:
notes = []
temp = MyGrammerVisitor().visitDeclare_chord(chord)
if temp.identifier.getText() not in self.variables:
# TODO: CHECK IF ERROR WHEN DECLARING FIXED CHORD
if isinstance(temp.chord, ExprChordNode):
expected_note_val = ""
for idx2, x in enumerate(temp.chord.notes):
notes.append(self.evaluateExprNoteNode(x))
if idx2 == 0:
expected_note_val = x.note_value.getText()
else:
if str(x.note_value.getText()) != expected_note_val:
line = x.note_value.getSymbol().line
col = x.note_value.getSymbol().column
raise Exception(
"Mismatch in note values, all notes within a chord must have the same note value",
line, col)
self.variables[temp.identifier.getText()] = ("CHORD", notes)
else: # FIXED CHORD
self.variables[temp.identifier.getText()] = ("FIXED_CHORD", temp.chord)
else:
line = temp.identifier.getSymbol().line
col = temp.identifier.getSymbol().column
raise Exception(
"Reassignment is not allowed. Use a different identifier",
line, col)
def evaluateDeclaredStaffs(self, ctx: list, instru):
# print("Declaring Staff", len(ctx.getChildren(), " found"))
for idx, i in enumerate(ctx):
# Gets a staff from music sheet
first_staff = False
last_staff = False
if idx == 0:
first_staff = True
if idx == len(ctx) - 1:
last_staff = True
if i.__class__.__name__ == 'Declare_staffContext':
staff = MyGrammerVisitor().visitDeclare_staff(i)
top = staff.beats_per_measure
if int(top.getText()) <= 0:
line = top.getSymbol().line
col = top.getSymbol().column
raise Exception(
"Number of beats in staff must be greater than 0",
line, col)
bottom = staff.note_value
if int(bottom.getText()) <= 0:
line = bottom.getSymbol().line
col = bottom.getSymbol().column
raise Exception(
"Note value of whole beats in staff must be greater than 0",
line, col)
staffUp = Staff(top.getText(), bottom.getText(), None)
staffDown = Staff(top.getText(), bottom.getText(), None)
for expr in staff.expressions:
self.evaluateStaffBlock(expr, top.getText(),
bottom.getText(), staffUp,
staffDown, first_staff, last_staff, self.ending_id)
# for x in expr:
# newStaff.expressions.append(x)
for measure in staffUp.expressions:
self.right.append(measure)
for measure in staffDown.expressions:
self.left.append(measure)
# up_idx = None
# down_idx = None
# for id in self.ending_id:
# if id[0] == "UP_START":
# up_idx = self.right.index(id[1])
# elif id[0] == "UP_END":
# repeat.insertRepeatEnding(self.right, up_idx, self.right.index(id[1]), endingNumber=id[2])
# for id in self.ending_id:
# if id[0] == "DOWN_START":
# down_idx = self.left.index(id[1])
# elif id[0] == "DOWN_END":
# repeat.insertRepeatEnding(self.left, down_idx, self.left.index(id[1]), endingNumber=id[2])
# self.staffs.append(staff1)
else: # Variable Expression checking
melodyVariable = MyGrammerVisitor().visitExpr_var(i)
if (not self.checkInListContext(i)):
if self.variables[melodyVariable.getText()][0] == "NOTE": #note
line = i.IDENTIFIER().getSymbol().line
col = i.IDENTIFIER().getSymbol().column
raise Exception(
"Variable must be melody but a note is called",
line, col)
elif self.variables[melodyVariable.getText()][0] == "CHORD": #chord:
line = i.IDENTIFIER().getSymbol().line
col = i.IDENTIFIER().getSymbol().column
raise Exception("Variable must be melody but a chord is called", line, col)
elif self.variables[melodyVariable.getText()][0] == "FIXED_CHORD": #chord:
line = i.IDENTIFIER().getSymbol().line
col = i.IDENTIFIER().getSymbol().column
raise Exception("Variable must be melody but a chord is called", line, col)
for idx2, pair in enumerate(
self.variables[melodyVariable.getText()]):
# melodyStaffUp = self.variables[melodyVariable.getText()][0][0]
# melodyStaffDown = self.variables[melodyVariable.getText()][0][1]
for i in pair[0]:
self.right.append(copy.deepcopy(i))
for i in pair[1]:
self.left.append(copy.deepcopy(i))
# self.staffs.append(Staff(None, None, melodyVariable.getText()))
# self.music_stream.insert(0, right)
# if self.checkInst in self.grandInst:
# self.music_stream.insert(0, left)
def checkInListContext(self, ctx):
line = ctx.IDENTIFIER().getSymbol().line
col = ctx.IDENTIFIER().getSymbol().column
if ctx.IDENTIFIER().getText() not in self.variables:
raise Exception("Variable called but not declared", line, col)
return False
def checkInListNode(self, node):
line = node.getSymbol().line
col = node.getSymbol().column
if node.getText() not in self.variables:
raise Exception("Variable called but not declared", line, col)
return False
def evaluateDeclaredMelody(self,
ctx: MyGrammerParser.Declare_melodyContext, instru):
# print("Declaring Melody", len(ctx.getChildren(), " found"))
for i in ctx:
# Gets a staff from music sheet
melody = MyGrammerVisitor().visitDeclare_melody(i)
identifier = melody.identifier
staffs = melody.staffs
melodyStaffs = []
print(identifier)
if melody.identifier.getText(
) not in self.variables: # If not then store the corresponding notes of a chord in a list each note is stored as tuple of values for each property of a note
for staff in staffs:
top = staff.beats_per_measure
if int(top.getText()) <= 0:
line = top.getSymbol().line
col = top.getSymbol().column
raise Exception(
"Number of beats in staff must be greater than 0",
line, col)
bottom = staff.note_value
if int(bottom.getText()) <= 0:
line = bottom.getSymbol().line
col = bottom.getSymbol().column
raise Exception(
"Note value of whole beats in staff must be greater than 0",
line, col)
staffUp = Staff(top.getText(), bottom.getText(), None)
staffDown = Staff(top.getText(), bottom.getText(), None)
for expr in staff.expressions:
self.evaluateStaffBlock(expr, top.getText(),
bottom.getText(), staffUp,
staffDown, False, False, self.ending_id)
# for x in expr:
# newStaff.expressions.append(x)
right = stream.Part()
left = stream.Part()
for measure in staffUp.expressions:
right.append(measure)
for measure in staffDown.expressions:
left.append(measure)
up_idx = None
down_idx = None
for id in self.ending_id:
if id[0] == "UP_START":
up_idx = right.index(id[1])
elif id[0] == "UP_END":
repeat.insertRepeatEnding(right, up_idx, right.index(id[1]), endingNumber=id[2])
for id in self.ending_id:
if id[0] == "DOWN_START":
down_idx = left.index(id[1])
elif id[0] == "DOWN_END":
repeat.insertRepeatEnding(left, down_idx, left.index(id[1]), endingNumber=id[2])
melodyStaffs.append((right, left))
self.variables[melody.identifier.getText()] = melodyStaffs
else: # Else if reassignment of a chord variable is attempted raise an exception
line = melody.identifier.getSymbol().line
col = melody.identifier.getSymbol().column
raise Exception(
"Reassignment is not allowed. Use a different identifier",
line, col)
def evaluateStaffBlock(self, ctx: list, beats_per_measure, note_value,
staffUp, staffDown, first_staff,
last_staff, ending_id): # List of Expressions of a staff block
staff_accidentals = {}
first_measure = False
last_measure = False
cur_beats_up = 0
for idx, x in enumerate(ctx):
measureUp = stream.Measure()
measureDown = stream.Measure()
cur_beats = 0
if first_staff:
first_staff = False
first_measure = True
if last_staff:
if idx == len(ctx) - 1 or all(isinstance(y, AccidentalExpressionNode) for y in ctx[idx + 1:]):
last_staff = False
last_measure = True
if isinstance(x, DeclareMeasuresNode) or isinstance(
x, DeclareMeasuresGrandNode):
# measureUp = stream.Measure()
# measureDown = stream.Measure()
measureUp.insert(0, meter.TimeSignature(beats_per_measure + "/" + note_value))
measureDown.insert(0, meter.TimeSignature(beats_per_measure + "/" + note_value))
if x.ending_start is not None:
if isinstance(x, DeclareMeasuresNode) and len(self.ending_ctr) > 0 or isinstance(x, DeclareMeasuresGrandNode) and len(self.ending_ctr) > 1:
line = x.ending_start.ENDSTART().getSymbol().line
col = x.ending_start.ENDSTART().getSymbol().column
raise(Exception("Endings should be ended first before declaring another", line, col))
else:
if isinstance(
x,
DeclareMeasuresGrandNode):
if x.direction == "UP":
ending_id.append(("UP_START", measureUp, [int(ending.getText()) for ending in x.ending_start.INTEGER()]))
else:
ending_id.append(("DOWN_START", measureDown, [int(ending.getText()) for ending in x.ending_start.INTEGER()]))
self.ending_ctr.append(x.ending_start)
else:
ending_id.append(measureUp)
self.ending_ctr.append(x.ending_start)
# ending_id.append(measureUp)
# self.ending_ctr.append(x.ending_start)
if isinstance(
x,
DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.ending_start is None:
line = expDown.expressions[0].note_value.getSymbol(
).line - 1
col = expDown.expressions[0].note_value.getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have endingstart",
line, col)
else:
up_numbers = [int(ending.getText()) for ending in x.ending_start.INTEGER()]
down_numbers = [int(ending.getText()) for ending in expDown.ending_start.INTEGER()]
up_numbers.sort()
down_numbers.sort()
if up_numbers != down_numbers:
line = expDown.expressions[0].note_value.getSymbol(
).line - 1
col = expDown.expressions[0].note_value.getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have the same ending numbers",
line, col)
else:
for i in up_numbers:
self.ending_values.append((i, x.ending_start.INTEGER()))
if x.ending_end is not None:
if len(self.ending_ctr) == 0:
line = x.ending_end.ENDEND().getSymbol().line
col = x.ending_end.ENDEND().getSymbol().column
raise Exception("Invalid ending placement", line, col)
if isinstance(
x,
DeclareMeasuresGrandNode):
if x.direction == "UP":
ending_id.append(("UP_END", measureUp))
else:
ending_id.append(("DOWN_END", measureDown))
else:
ending_id.append(measureUp)
if isinstance(
x,
DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.ending_end is None:
line = expDown.expressions[0].note_value.getSymbol(
).line - 1
col = expDown.expressions[0].note_value.getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have endingend",
line, col)
del self.ending_ctr[-1]
if x.repeat_start is not None:
if isinstance(x, DeclareMeasuresNode):
measureUp.leftBarline = bar.Repeat(direction='start')
elif isinstance(x, DeclareMeasuresGrandNode) and x.direction == "UP":
measureUp.leftBarline = bar.Repeat(direction='start')
else:
measureDown.leftBarline = bar.Repeat(direction='start')
self.repeat_ctr.append(x.repeat_start)
if isinstance(
x,
DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.repeat_start is None:
line = expDown.expressions[0].getSymbol(
).line - 1
col = expDown.expressions[0].getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have repstart",
line, col)
if x.repeat_end is not None:
repeat_times = None
if x.repeat_end.INTEGER() is None:
repeat_times = 1
else:
repeat_times = int(x.repeat_end.INTEGER().getText())
if repeat_times < 0 or repeat_times > 10: # TODO: should we even count for this i think ok lang na wala restriction
line = x.repeat_end.INTEGER().getSymbol().line
col = x.repeat_end.INTEGER().getSymbol().column
raise Exception(
"Number of repeats must be less than or equal to 10",
line, col)
else:
if isinstance(x, DeclareMeasuresNode):
measureUp.rightBarline = bar.Repeat(
direction='end', times = repeat_times)
elif isinstance(x, DeclareMeasuresGrandNode) and x.direction == "UP":
measureUp.rightBarline = bar.Repeat(
direction='end', times = repeat_times)
else:
measureDown.rightBarline = bar.Repeat(
direction='end', times = repeat_times)
if len(self.repeat_ctr) > 0:
del self.repeat_ctr[-1]
if isinstance(
x,
DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.repeat_end is None:
line = expDown.expressions[0].getSymbol(
).line - 1
col = expDown.expressions[0].getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have repend",
line, col)
repeat_times_down = None
if expDown.repeat_end.INTEGER() is None:
repeat_times_down = 1
else:
repeat_times_down = int(expDown.repeat_end.INTEGER().getText())
if repeat_times_down != repeat_times:
line = expDown.repeat_end.REPEND().getSymbol(
).line
col = expDown.repeat_end.REPEND().getSymbol(
).column
raise Exception(
"measureUp and measureDown pairs must both have the same number of repeats",
line, col)
if x.repeat_start is None and isinstance(x, DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.repeat_start is not None:
if not isinstance(x.expressions[0], DeclarePatternNode):
line = x.expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].note_value.getSymbol().column
else:
print("error is ", type(x.expressions[0].expressions), len(x.expressions[0].expressions))
line = x.expressions[0].expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].expressions[0].note_value.getSymbol().column
raise Exception(
"measureUp and measureDown pairs must both have repstart",
line, col)
if x.repeat_end is None and isinstance(
x, DeclareMeasuresGrandNode) and x.direction == "UP":
expDown = ctx[idx + 1]
if expDown.repeat_end is not None:
line = x.expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].note_value.getSymbol().column
raise Exception(
"measureUp and measureDown pairs must both have repend",
line, col)
if isinstance(x, DeclareMeasuresGrandNode
) and self.checkInst not in self.grandInst:
line = x.expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].note_value.getSymbol().column
raise Exception(
"Grand staff directions are only allowed for keyboard instruments",
line, col)
elif isinstance(x, DeclareMeasuresNode
) and self.checkInst in self.grandInst:
line = x.expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].note_value.getSymbol().column
raise Exception(
"Grand staff directions are required for keyboard instruments",
line, col)
measure_accidentals = {}
print("--------- MEASURE -------")
for mIdx, m_expr in enumerate(x.expressions):
if isinstance(m_expr, ExprNoteNode):
cur_beats += valToBeat(str(m_expr.note_value),
float(note_value),
bool(m_expr.dotted))
if cur_beats > float(beats_per_measure):
line = m_expr.note_value.getSymbol().line
col = m_expr.note_value.getSymbol().column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
else:
if isinstance(x, DeclareMeasuresGrandNode
) and x.direction == "DOWN":
print("down")
pitch = m_expr.pitch.getText()
octave = m_expr.num.getText()
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = m_expr.accidental
measureDown.append(
createNote(str(m_expr.num),
str(updated_acc),
str(m_expr.pitch),
str(m_expr.note_value),
bool(m_expr.dotted)))
else:
pitch = m_expr.pitch.getText()
octave = m_expr.num.getText()
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = m_expr.accidental
measureUp.append(
createNote(str(m_expr.num),
str(updated_acc),
str(m_expr.pitch),
str(m_expr.note_value),
bool(m_expr.dotted)))
# printExprNote(m_expr)
elif isinstance(m_expr, ExprChordNode):
expected_note_val, is_dotted = processExprChord(m_expr.notes, "EXPR")
cur_beats += valToBeat(expected_note_val,
float(note_value), is_dotted)
if cur_beats > float(beats_per_measure):
line = m_expr.notes[0].note_value.getSymbol().line
col = m_expr.notes[0].note_value.getSymbol().column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
else:
new_notes = []
for n in m_expr.notes:
pitch = n.pitch.getText()
octave = n.num.getText()
if pitch in measure_accidentals:
n.accidental = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
n.accidental = staff_accidentals[(pitch)]
new_notes.append((str(n.num), str(n.pitch), str(n.accidental)))
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
measureDown.append(createChord(new_notes, expected_note_val, is_dotted))
else:
measureUp.append(createChord(new_notes, expected_note_val, is_dotted))
printExprChord(m_expr)
elif isinstance(m_expr, ExprFixedChordNode):
cur_beats += valToBeat(str(m_expr.note_value), float(note_value), bool(m_expr.dotted))
if cur_beats > float(beats_per_measure):
line = m_expr.note_value.getSymbol().line
col = m_expr.note_value.getSymbol().column
raise Exception("Number of beats in measure has exceeded amount required within staff", line, col)
else:
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
measureDown.append(createFixedChord(str(m_expr.note_value), str(m_expr.num), str(m_expr.fixed_chord), bool(m_expr.dotted)))
else:
measureUp.append(createFixedChord(str(m_expr.note_value), str(m_expr.num), str(m_expr.fixed_chord), bool(m_expr.dotted)))
print(m_expr)
elif isinstance(m_expr, ExprRestNode):
cur_beats += valToBeat(str(m_expr.note_value),
float(note_value),
bool(m_expr.dotted))
if cur_beats > float(beats_per_measure):
line = m_expr.note_value.getSymbol().line
col = m_expr.note_value.getSymbol().column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
else:
if isinstance(x, DeclareMeasuresGrandNode
) and x.direction == "DOWN":
measureDown.append(
createRest(str(m_expr.note_value),
bool(m_expr.dotted)))
else:
measureUp.append(
createRest(str(m_expr.note_value),
bool(m_expr.dotted)))
printExprRest(m_expr)
elif isinstance(m_expr, AccidentalExpressionNode):
# print("accidental")
for i in m_expr.accidentals: # List of AccidentalNodes
print("measure accie")
measure_accidentals[(i.pitch.getText(),i.octave.getText())] = i.accidental.getText() if i.accidental is not None else ""
# print("axie",
# staff_accidentals[(i.pitch, i.octave)],
# i.pitch, i.octave)
elif isinstance(m_expr, DeclarePatternNode):
first = 1
note_val = ""
for tuplet_expr in m_expr.expressions:
if first ==1:
note_val = str(tuplet_expr.note_value)
first = 0
else:
#check if notevalue matches with initial (for homogeneity)
if str(tuplet_expr.note_value) != note_val:
line = tuplet_expr.note_value.getSymbol().line
col = tuplet_expr.note_value.getSymbol().column
raise Exception("Tuplet note does not match other notes", line, col)
# check noteval of those isnside tuple i.e. quarter => 1.0 quarter length
quarter_length = getNoteDuration(note_val)
# get total number of notes in tuplet
multiplier = (len(m_expr.expressions) - 1) / len(m_expr.expressions)
# divide quarterlength by total number of notes in tuplet
new_duration = quarter_length * multiplier
# modify each duration of each note in tuplet to the corresponding value
cur_beats += new_duration * 2
# print("NEW DURATION CURBEATS", cur_beats)
# input()
if cur_beats > float(beats_per_measure):
line = tuplet_expr.note_value.getSymbol(
).line
col = tuplet_expr.note_value.getSymbol(
).column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
# add each note to measure
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
pitch = tuplet_expr.pitch.getText()
octave = tuplet_expr.num.getText()
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = tuplet_expr.accidental
d = duration.Duration(quarter_length * multiplier)
n = None
if str(updated_acc) == "_":
accidental = "-"
elif str(updated_acc) == "#":
accidental = "#"
else:
accidental = ""
n = note.Note(str(tuplet_expr.pitch) + str(accidental) + str(tuplet_expr.num))
n.duration = d
if tuplet_expr.dotted:
n.quarterLength = n.quarterLength + (n.quarterLength / 2)
measureDown.append(n)
else:
pitch = tuplet_expr.pitch.getText()
octave = tuplet_expr.num.getText()
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = tuplet_expr.accidental
d = duration.Duration(quarter_length * multiplier)
n = None
if str(updated_acc) == "_":
accidental = "-"
elif str(updated_acc) == "#":
accidental = "#"
else:
accidental = ""
n = note.Note(str(tuplet_expr.pitch) + str(accidental) + str(tuplet_expr.num))
n.duration = d
if tuplet_expr.dotted:
n.quarterLength = n.quarterLength + (n.quarterLength / 2)
measureUp.append(n)
# measureUp.append(
# createTupletNote(
# str(tuplet_expr.num),
# str(updated_acc
# ),
# str(tuplet_expr.pitch),
# str(tuplet_expr.dotted),
# new_duration
# ))
# printExprNote(tuplet_expr)
else:
if (not self.checkInListNode(m_expr)
): # Error checking identifier and if melody
if self.variables[m_expr.getText()][0] == "NOTE":
# print(self.variables[m_expr.getText()])
cur_beats += valToBeat(
str(self.variables[m_expr.getText()][1]),
float(note_value),
bool(self.variables[m_expr.getText()][5]))
if cur_beats > float(beats_per_measure):
line = m_expr.getSymbol().line
col = m_expr.getSymbol().column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
else:
if isinstance(x, DeclareMeasuresGrandNode
) and x.direction == "DOWN":
pitch = self.variables[m_expr.getText()][3]
octave = self.variables[m_expr.getText()][4]
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = self.variables[m_expr.getText()][2]
measureDown.append(
createNote(
str(self.variables[
m_expr.getText()][4]),
str(updated_acc),
str(self.variables[
m_expr.getText()][3]),
str(self.variables[
m_expr.getText()][1]),
bool(self.variables[
m_expr.getText()][5])))
else:
pitch = self.variables[m_expr.getText()][3]
octave = self.variables[m_expr.getText()][4]
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = self.variables[m_expr.getText()][2]
measureUp.append(
createNote(
str(self.variables[m_expr.getText()][4]),
str(updated_acc),
str(self.variables[ m_expr.getText()][3]),
str(self.variables[m_expr.getText()][1]),
bool(self.variables[m_expr.getText()][5])))
elif self.variables[m_expr.getText()][0] == "CHORD":
expected_note_val, is_dotted = processExprChord(self.variables[m_expr.getText()][1], "VAR")
print("here1 " + str(self.variables[m_expr.getText()][1][0]))
print("here2 " + str(valToBeat(expected_note_val,
float(note_value),
is_dotted)))
cur_beats += valToBeat(expected_note_val,
float(note_value),
is_dotted)
if cur_beats > float(beats_per_measure):
line = m_expr.getSymbol().line
col = m_expr.getSymbol().column
raise Exception(
"Number of beats in measure has exceeded amount required within staff",
line, col)
else:
new_notes = []
for n in self.variables[m_expr.getText()][1]:
pitch = str(n[2])
octave = str(n[3])
if pitch in measure_accidentals:
updated_acc = measure_accidentals[(pitch)]
elif pitch in staff_accidentals:
updated_acc = staff_accidentals[(pitch)]
else:
updated_acc = str(n[1])
new_notes.append(( str(n[3]), str(n[2]), updated_acc))
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
measureDown.append(createChord(new_notes, expected_note_val, is_dotted))
else:
measureUp.append(createChord(new_notes, expected_note_val, is_dotted))
elif self.variables[m_expr.getText()][0] == "FIXED_CHORD":
fixed = self.variables[m_expr.getText()][1]
cur_beats += valToBeat(str(fixed.note_value), float(note_value), bool(fixed.dotted))
if cur_beats > float(beats_per_measure):
line = fixed.note_value.getSymbol().line
col = fixed.note_value.getSymbol().column
raise Exception("Number of beats in measure has exceeded amount required within staff", line, col)
else:
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
measureDown.append(createFixedChord(str(fixed.note_value), str(fixed.num), str(fixed.fixed_chord), bool(fixed.dotted)))
else:
measureUp.append(createFixedChord(str(fixed.note_value), str(fixed.num), str(fixed.fixed_chord), bool(fixed.dotted)))
else:
line = m_expr.getSymbol().line
col = m_expr.getSymbol().column
raise Exception(
"Variable must be note or chord but a melody is called",
line, col)
print(m_expr)
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "UP":
cur_beats_up = cur_beats
elif isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
# print("CURBREATS", cur_beats)
# print("CURBEATSUP", cur_beats_up)
if cur_beats != cur_beats_up:
if x.expressions[0].__class__.__name__ == "ExprChordNode":
line = x.expressions[0].notes[0].note_value.getSymbol().line - 1
col = x.expressions[0].notes[0].note_value.getSymbol().column
elif x.expressions[0].__class__.__name__ == "ExprNoteNode" or x.expressions[0].__class__.__name__ == "ExprRestNode":
line = x.expressions[0].note_value.getSymbol().line - 1
col = x.expressions[0].note_value.getSymbol().column
else:
line = x.expressions[0].getSymbol().line - 1
col = x.expressions[0].getSymbol().column
print("hereUp " + str(cur_beats_up))
print("hereDown " + str(cur_beats))
raise Exception("Number of beats are unequal between grand measures", line, col)
if first_measure:
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
first_measure = False
if last_measure:
if isinstance(x, DeclareMeasuresGrandNode) and x.direction == "DOWN":
last_measure = False
if len(measureUp) > 1:
staffUp.expressions.append(measureUp)
if len(measureDown) > 1:
staffDown.expressions.append(measureDown)
elif isinstance(x, AccidentalExpressionNode):
print("accidental")
for acc_expr in x.accidentals:
# Assign an accidental for a particular pitch and octave
pitch = acc_expr.pitch.getText()
staff_accidentals[(pitch)] = acc_expr.accidental.getText() if acc_expr.accidental is not None else ""
def evaluate(self, node):
self.bpm = node.bpm
self.instrument = node.instrument
self.checkInst = self.instrument.getText().lower()
if (int(self.bpm.getText()) > 300):
line = self.bpm.getSymbol().line
col = self.bpm.getSymbol().column
raise Exception("Invalid BPM value not in range 300", line, col)
elif (int(self.bpm.getText()) < 0):
line = self.bpm.getSymbol().line
col = self.bpm.getSymbol().column
raise Exception("Invalid BPM value, cannot be less than 0", line,
col)
else:
print("bpm (" + str(self.bpm) + ")")
print(node.instrument.getText())
appendInstrument(self.right, self.instrument)
appendInstrument(self.left, self.instrument)
self.music_stream.append(tempo.MetronomeMark(number=int(self.bpm.getText())))
self.evaluateDeclaredNotes(
node.notes) # Returns NoteExpression Objects
self.evaluateDeclaredChords(
node.chords) # Returns ChordExpression Objects
self.evaluateDeclaredMelody(node.melodies, self.instrument)
self.evaluateDeclaredStaffs(node.staffs, self.instrument)
self.music_stream.insert(0, self.right)
if self.checkInst in self.grandInst:
self.music_stream.insert(0, self.left)
down_idx = None
endingNumber = []
print(self.ending_id)
# input()
for id in self.ending_id:
if id[0] == "UP_START":
up_idx = self.right.index(id[1])
endingNumber = id[2]
elif id[0] == "UP_END":
print(up_idx, self.right.index(id[1]))
# input()
repeat.insertRepeatEnding(self.right, up_idx, self.right.index(id[1]), endingNumber=endingNumber)
down_idx = None
endingNumber = []
for id in self.ending_id:
if id[0] == "DOWN_START":
down_idx = self.left.index(id[1])
endingNumber = id[2]
elif id[0] == "DOWN_END":
repeat.insertRepeatEnding(self.left, down_idx, self.left.index(id[1]), endingNumber=endingNumber)
if len(self.repeat_ctr):
rep = self.repeat_ctr[0]
line = rep.REPSTART().getSymbol().line
col = rep.REPSTART().getSymbol().column
raise Exception("Invalid repeat placement", line, col)
if len(self.ending_ctr):
end = self.ending_ctr[0]
line = end.REPSTART().getSymbol().line
col = end.REPSTART().getSymbol().column
raise Exception("Invalid ending placement", line, col)
end_value = 1
for i in self.ending_values:
if i[0] != 1 and i[0] != end_value:
line = i[1][0].getSymbol().line
col = i[1][0].getSymbol().column
raise Exception("Invalid ending number", line, col)
else:
if i[0] == 1:
end_value = 2
else:
end_value += 1
# self.music_stream.write('midi', fp='test.midi')
self.music_stream.show('t')
sp = midi.realtime.StreamPlayer(self.music_stream)
sp.play()
return "MIDI FILE"
| [
"MyGrammerVisitor.MyGrammerVisitor",
"copy.deepcopy"
] | [((8668, 8686), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (8684, 8686), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((9648, 9666), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (9664, 9666), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((16523, 16541), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (16539, 16541), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((11624, 11642), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (11640, 11642), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((13890, 13908), 'MyGrammerVisitor.MyGrammerVisitor', 'MyGrammerVisitor', ([], {}), '()\n', (13906, 13908), False, 'from MyGrammerVisitor import MyGrammerVisitor\n'), ((15351, 15367), 'copy.deepcopy', 'copy.deepcopy', (['i'], {}), '(i)\n', (15364, 15367), False, 'import copy\n'), ((15448, 15464), 'copy.deepcopy', 'copy.deepcopy', (['i'], {}), '(i)\n', (15461, 15464), False, 'import copy\n')] |
import time
import pytest
from brownie import ERC20Basic, network
from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
@pytest.fixture
def deploy_erc20_exercise():
# Arrange
# Arrange / Act
erc20 = ERC20Basic.deploy(
10000000,
{"from": get_account()}
)
# Assert
assert erc20 is not None
return erc20
def test_erc20_totalSupply(deploy_erc20_exercise):
# Arrange
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
erc20 = deploy_erc20_exercise
# Assert
assert erc20.totalSupply() > 0
def test_erc20_transfer(deploy_erc20_exercise):
# Arrange
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
erc20 = deploy_erc20_exercise
tokensToTransfer = 100
receiverAddress = '0x517f7Fe4f8778be9ff2dcFab5A99d434a52B63a7'
senderCurrentTokenBalance = erc20.balanceOf(get_account())
# Act
transaction_receipt = erc20.transfer(receiverAddress,tokensToTransfer, {"from": get_account()})
newTokenBalance = erc20.balanceOf(get_account())
# Assert
assert newTokenBalance < senderCurrentTokenBalance
| [
"brownie.network.show_active",
"pytest.skip",
"scripts.helpful_scripts.get_account"
] | [((445, 466), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (464, 466), False, 'from brownie import ERC20Basic, network\n'), ((513, 550), 'pytest.skip', 'pytest.skip', (['"""Only for local testing"""'], {}), "('Only for local testing')\n", (524, 550), False, 'import pytest\n'), ((705, 726), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (724, 726), False, 'from brownie import ERC20Basic, network\n'), ((773, 810), 'pytest.skip', 'pytest.skip', (['"""Only for local testing"""'], {}), "('Only for local testing')\n", (784, 810), False, 'import pytest\n'), ((987, 1000), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (998, 1000), False, 'from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account\n'), ((1151, 1164), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (1162, 1164), False, 'from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account\n'), ((292, 305), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (303, 305), False, 'from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account\n'), ((1097, 1110), 'scripts.helpful_scripts.get_account', 'get_account', ([], {}), '()\n', (1108, 1110), False, 'from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account\n')] |
import cv2
img1 = cv2.imread('img1.jpg')
height , width , layers = img1.shape
fourcc = cv2.VideoWriter_fourcc(*'X264')
video = cv2.VideoWriter('stitched_video.mp4',fourcc,7.0,(width,height))
frame_num = 1
while (frame_num <= 6552):
frame_name = "img" + str(frame_num) + ".jpg"
img = cv2.imread(frame_name)
video.write(img)
frame_num += 1
cv2.destroyAllWindows()
video.release()
| [
"cv2.imread",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc",
"cv2.VideoWriter"
] | [((19, 41), 'cv2.imread', 'cv2.imread', (['"""img1.jpg"""'], {}), "('img1.jpg')\n", (29, 41), False, 'import cv2\n'), ((90, 121), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'X264'"], {}), "(*'X264')\n", (112, 121), False, 'import cv2\n'), ((130, 197), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""stitched_video.mp4"""', 'fourcc', '(7.0)', '(width, height)'], {}), "('stitched_video.mp4', fourcc, 7.0, (width, height))\n", (145, 197), False, 'import cv2\n'), ((347, 370), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (368, 370), False, 'import cv2\n'), ((289, 311), 'cv2.imread', 'cv2.imread', (['frame_name'], {}), '(frame_name)\n', (299, 311), False, 'import cv2\n')] |
# Generated by Django 2.2.5 on 2019-09-21 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('machines', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='device',
name='serial',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='serial number'),
),
migrations.AlterField(
model_name='device',
name='invoice',
field=models.CharField(blank=True, max_length=150, null=True, verbose_name='invoice number'),
),
]
| [
"django.db.models.CharField"
] | [((324, 414), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'null': '(True)', 'verbose_name': '"""serial number"""'}), "(blank=True, max_length=150, null=True, verbose_name=\n 'serial number')\n", (340, 414), False, 'from django.db import migrations, models\n'), ((532, 623), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'null': '(True)', 'verbose_name': '"""invoice number"""'}), "(blank=True, max_length=150, null=True, verbose_name=\n 'invoice number')\n", (548, 623), False, 'from django.db import migrations, models\n')] |
from django.db import models
class Porteiro(models.Model):
usuario = models.OneToOneField('usuarios.Usuario', verbose_name='Usuário', on_delete=models.PROTECT)
nome_completo = models.CharField(verbose_name='Nome completo', max_length=194)
cpf = models.CharField(verbose_name='CPF', max_length=11)
telefone = models.CharField(verbose_name='Telefone de contato', max_length=11)
data_nascimento = models.DateField(verbose_name='Data de nascimento', auto_now=False, auto_now_add=False)
class Meta:
verbose_name = 'Porteiro'
verbose_name_plural = 'Porteiros'
db_table = 'porteiro'
def __str__(self):
return self.nome_completo
| [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.CharField"
] | [((75, 170), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""usuarios.Usuario"""'], {'verbose_name': '"""Usuário"""', 'on_delete': 'models.PROTECT'}), "('usuarios.Usuario', verbose_name='Usuário', on_delete=\n models.PROTECT)\n", (95, 170), False, 'from django.db import models\n'), ((186, 248), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Nome completo"""', 'max_length': '(194)'}), "(verbose_name='Nome completo', max_length=194)\n", (202, 248), False, 'from django.db import models\n'), ((259, 310), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""CPF"""', 'max_length': '(11)'}), "(verbose_name='CPF', max_length=11)\n", (275, 310), False, 'from django.db import models\n'), ((326, 393), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""Telefone de contato"""', 'max_length': '(11)'}), "(verbose_name='Telefone de contato', max_length=11)\n", (342, 393), False, 'from django.db import models\n'), ((416, 507), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""Data de nascimento"""', 'auto_now': '(False)', 'auto_now_add': '(False)'}), "(verbose_name='Data de nascimento', auto_now=False,\n auto_now_add=False)\n", (432, 507), False, 'from django.db import models\n')] |
import logging
import torch
log = logging.getLogger(__name__)
def get_training_device(try_gpu=True):
"""
Retrieves the device used for training. It favours a CUDA-enabled device,
but reverts to CPU if it doesn't find one.
Keyword arguments:
try_gpu -- If true, this function will prefer a CUDA-enabled device. Otherwise,
it will always use the CPU.
Returns:
The training device.
"""
if try_gpu and torch.cuda.is_available():
log.info("CUDA is available; using it.")
device = torch.device("cuda:0")
else:
log.info("CUDA is not available; using CPU.")
device = torch.device("cpu")
return device
| [
"logging.getLogger",
"torch.cuda.is_available",
"torch.device"
] | [((36, 63), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (53, 63), False, 'import logging\n'), ((448, 473), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (471, 473), False, 'import torch\n'), ((541, 563), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (553, 563), False, 'import torch\n'), ((645, 664), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (657, 664), False, 'import torch\n')] |
import pandas as pd
import scipy.io
import os
filenames = []
for filename in os.listdir('.'):
if '.mat' in filename:
filenames.append(filename)
for filename in filenames:
print(f'Processing file: {filename}')
mat = scipy.io.loadmat(filename)
headings = [
'Timestamp', 'Voltage', 'Current', 'Ah',
'Wh', 'Power', 'Battery Temp',
'Time', 'Chamber Temp'
]
data = {}
for i, heading in enumerate(headings):
if heading == 'Timestamp':
continue
data[heading] = [point[0] for point in mat['meas'][0][0][i]]
df = pd.DataFrame(data)
csv_filename = filename.replace('.mat', '.csv')
df.to_csv('csv-data/' + csv_filename, index=False)
| [
"pandas.DataFrame",
"os.listdir"
] | [((85, 100), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (95, 100), False, 'import os\n'), ((631, 649), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (643, 649), True, 'import pandas as pd\n')] |
# Generated by Django 3.0.4 on 2020-04-17 23:13
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Champion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lane', models.CharField(default='', help_text='한글로 입력해주세요. ex)上单/中路', max_length=20, verbose_name='라인')),
('keyword', models.CharField(default='', help_text='"/" 로 구분해주세요. ex)가렌/garen', max_length=255, verbose_name='검색어')),
('no', models.CharField(default='', max_length=3, verbose_name='角色번호')),
('name', models.CharField(default='', help_text='한글로 입력해주세요. ex)가렌', max_length=20, verbose_name='角色명')),
],
options={
'verbose_name': '角色',
'verbose_name_plural': '角色',
},
),
migrations.CreateModel(
name='Draft',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('match_name', models.CharField(max_length=255, verbose_name='매치 이름')),
('red_team_name', models.CharField(max_length=255, verbose_name='红队名称')),
('blue_team_name', models.CharField(max_length=255, verbose_name='蓝队名称')),
('password', models.CharField(max_length=255, verbose_name='패스워드')),
('banpick', models.CharField(blank=True, max_length=255, verbose_name='禁选Data')),
('timer', models.DateTimeField(blank=True, null=True, verbose_name='시작')),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='일시')),
],
options={
'verbose_name': '禁选',
'verbose_name_plural': '禁选',
},
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((333, 426), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (349, 426), False, 'from django.db import migrations, models\n'), ((450, 551), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""한글로 입력해주세요. ex)上单/中路"""', 'max_length': '(20)', 'verbose_name': '"""라인"""'}), "(default='', help_text='한글로 입력해주세요. ex)上单/中路', max_length=\n 20, verbose_name='라인')\n", (466, 551), False, 'from django.db import migrations, models\n'), ((577, 684), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '""""/" 로 구분해주세요. ex)가렌/garen"""', 'max_length': '(255)', 'verbose_name': '"""검색어"""'}), '(default=\'\', help_text=\'"/" 로 구분해주세요. ex)가렌/garen\',\n max_length=255, verbose_name=\'검색어\')\n', (593, 684), False, 'from django.db import migrations, models\n'), ((706, 769), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(3)', 'verbose_name': '"""角色번호"""'}), "(default='', max_length=3, verbose_name='角色번호')\n", (722, 769), False, 'from django.db import migrations, models\n'), ((797, 895), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""한글로 입력해주세요. ex)가렌"""', 'max_length': '(20)', 'verbose_name': '"""角色명"""'}), "(default='', help_text='한글로 입력해주세요. ex)가렌', max_length=20,\n verbose_name='角色명')\n", (813, 895), False, 'from django.db import migrations, models\n'), ((1142, 1235), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1158, 1235), False, 'from django.db import migrations, models\n'), ((1265, 1319), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""매치 이름"""'}), "(max_length=255, verbose_name='매치 이름')\n", (1281, 1319), False, 'from django.db import migrations, models\n'), ((1356, 1409), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""红队名称"""'}), "(max_length=255, verbose_name='红队名称')\n", (1372, 1409), False, 'from django.db import migrations, models\n'), ((1447, 1500), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""蓝队名称"""'}), "(max_length=255, verbose_name='蓝队名称')\n", (1463, 1500), False, 'from django.db import migrations, models\n'), ((1532, 1585), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""패스워드"""'}), "(max_length=255, verbose_name='패스워드')\n", (1548, 1585), False, 'from django.db import migrations, models\n'), ((1616, 1683), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'verbose_name': '"""禁选Data"""'}), "(blank=True, max_length=255, verbose_name='禁选Data')\n", (1632, 1683), False, 'from django.db import migrations, models\n'), ((1712, 1774), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""시작"""'}), "(blank=True, null=True, verbose_name='시작')\n", (1732, 1774), False, 'from django.db import migrations, models\n'), ((1802, 1876), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""일시"""'}), "(default=django.utils.timezone.now, verbose_name='일시')\n", (1822, 1876), False, 'from django.db import migrations, models\n')] |
"""
This module contains helper functions
"""
import logging
import signal
from typing import Any
def handle_sigterm(*_args: Any) -> None:
"""
Sigterm handler: raise KeyboardInterrupt.
:param _args: unused arguments, accept any arguments
:raise KeyboardInterrupt: raises a keyboard interrupt
"""
raise KeyboardInterrupt()
def trim_string(string: str, max_length: int = 155) -> str:
"""
Trim string for debug purposes.
:param string: the strim to trim
:param max_length: the maximal output length of the trimmed string
:return: the trimmed string as '<first half> ... <second half>'
"""
half_length = int((max_length - 5) / 2)
return (
(string[:half_length] + " ... " + string[-half_length:])
if len(string) > max_length
else string
)
def init(name: str, logger_level: int = logging.INFO) -> logging.Logger:
"""
Initialize logger and sigterm handler.
:param name: name of the logger
:param logger_level: the logging level to use
:return: a logger instance
"""
signal.signal(signal.SIGTERM, handle_sigterm)
logger = logging.getLogger(name)
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger.setLevel(logger_level)
return logger
| [
"logging.getLogger",
"signal.signal",
"logging.basicConfig"
] | [((1124, 1169), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handle_sigterm'], {}), '(signal.SIGTERM, handle_sigterm)\n', (1137, 1169), False, 'import signal\n'), ((1186, 1209), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1203, 1209), False, 'import logging\n'), ((1215, 1302), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1234, 1302), False, 'import logging\n')] |
import os, random, numpy as np, copy
from torch.utils.data import Dataset
import torch
def seq_collate(data):
(past_traj, future_traj) = zip(*data)
past_traj = torch.stack(past_traj,dim=0)
future_traj = torch.stack(future_traj,dim=0)
data = {
'past_traj': past_traj,
'future_traj': future_traj,
'seq': 'nba',
}
return data
class NBADataset(Dataset):
"""Dataloder for the Trajectory datasets"""
def __init__(
self, obs_len=5, pred_len=10, training=True
):
super(NBADataset, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = self.obs_len + self.pred_len
if training:
data_root = 'datasets/nba/train.npy'
else:
data_root = 'datasets/nba/test.npy'
self.trajs = np.load(data_root)
self.trajs /= (94/28) # Turn to meters
if training:
self.trajs = self.trajs[:32500]
else:
self.trajs = self.trajs[:12500]
self.batch_len = len(self.trajs)
print(self.batch_len)
self.traj_abs = torch.from_numpy(self.trajs).type(torch.float)
self.traj_norm = torch.from_numpy(self.trajs-self.trajs[:,self.obs_len-1:self.obs_len]).type(torch.float)
self.traj_abs = self.traj_abs.permute(0,2,1,3)
self.traj_norm = self.traj_norm.permute(0,2,1,3)
# print(self.traj_abs.shape)
def __len__(self):
return self.batch_len
def __getitem__(self, index):
# print(self.traj_abs.shape)
past_traj = self.traj_abs[index, :, :self.obs_len, :]
future_traj = self.traj_abs[index, :, self.obs_len:, :]
out = [past_traj, future_traj]
return out
| [
"torch.stack",
"torch.from_numpy",
"numpy.load"
] | [((172, 201), 'torch.stack', 'torch.stack', (['past_traj'], {'dim': '(0)'}), '(past_traj, dim=0)\n', (183, 201), False, 'import torch\n'), ((219, 250), 'torch.stack', 'torch.stack', (['future_traj'], {'dim': '(0)'}), '(future_traj, dim=0)\n', (230, 250), False, 'import torch\n'), ((843, 861), 'numpy.load', 'np.load', (['data_root'], {}), '(data_root)\n', (850, 861), True, 'import os, random, numpy as np, copy\n'), ((1131, 1159), 'torch.from_numpy', 'torch.from_numpy', (['self.trajs'], {}), '(self.trajs)\n', (1147, 1159), False, 'import torch\n'), ((1203, 1278), 'torch.from_numpy', 'torch.from_numpy', (['(self.trajs - self.trajs[:, self.obs_len - 1:self.obs_len])'], {}), '(self.trajs - self.trajs[:, self.obs_len - 1:self.obs_len])\n', (1219, 1278), False, 'import torch\n')] |
from django import template
from core.models import *
register = template.Library()
import re
@register.filter(name="cimg")
def cimg(value):
if value.img.name != '':
return value.img.name[4:]
else:
return "" | [
"django.template.Library"
] | [((65, 83), 'django.template.Library', 'template.Library', ([], {}), '()\n', (81, 83), False, 'from django import template\n')] |
#! /usr/bin/python
# -*- coding: utf8 -*-
import os, time, random
import numpy as np
import scipy
import tensorflow as tf
import tensorlayer as tl
from model import *
from utils import *
from config import *
###====================== HYPER-PARAMETERS ===========================###
batch_size = config.train.batch_size
patch_size = config.train.in_patch_size
ni = int(np.sqrt(config.train.batch_size))
def compute_charbonnier_loss(tensor1, tensor2, is_mean=True):
epsilon = 1e-6
if is_mean:
loss = tf.reduce_mean(tf.reduce_mean(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
else:
loss = tf.reduce_mean(tf.reduce_sum(tf.sqrt(tf.square(tf.subtract(tensor1,tensor2))+epsilon), [1, 2, 3]))
return loss
def load_file_list():
train_hr_file_list = []
train_lr_file_list = []
valid_hr_file_list = []
valid_lr_file_list = []
directory = config.train.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_hr_file_list.append("%s%s"%(directory,filename))
directory = config.train.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
train_lr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.hr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_hr_file_list.append("%s%s"%(directory,filename))
directory = config.valid.lr_folder_path
for filename in [y for y in os.listdir(directory) if os.path.isfile(os.path.join(directory,y))]:
valid_lr_file_list.append("%s%s"%(directory,filename))
return sorted(train_hr_file_list),sorted(train_lr_file_list),sorted(valid_hr_file_list),sorted(valid_lr_file_list)
def prepare_nn_data(hr_img_list, lr_img_list, idx_img=None):
i = np.random.randint(len(hr_img_list)) if (idx_img is None) else idx_img
input_image = get_imgs_fn(lr_img_list[i])
output_image = get_imgs_fn(hr_img_list[i])
scale = int(output_image.shape[0] / input_image.shape[0])
assert scale == config.model.scale
out_patch_size = patch_size * scale
input_batch = np.empty([batch_size,patch_size,patch_size,3])
output_batch = np.empty([batch_size,out_patch_size,out_patch_size,3])
for idx in range(batch_size):
in_row_ind = random.randint(0,input_image.shape[0]-patch_size)
in_col_ind = random.randint(0,input_image.shape[1]-patch_size)
input_cropped = augment_imgs_fn(input_image[in_row_ind:in_row_ind+patch_size,
in_col_ind:in_col_ind+patch_size])
input_cropped = normalize_imgs_fn(input_cropped)
input_cropped = np.expand_dims(input_cropped,axis=0)
input_batch[idx] = input_cropped
out_row_ind = in_row_ind * scale
out_col_ind = in_col_ind * scale
output_cropped = output_image[out_row_ind:out_row_ind+out_patch_size,
out_col_ind:out_col_ind+out_patch_size]
output_cropped = normalize_imgs_fn(output_cropped)
output_cropped = np.expand_dims(output_cropped,axis=0)
output_batch[idx] = output_cropped
return input_batch,output_batch
def train():
save_dir = "%s/%s_train"%(config.model.result_path,tl.global_flag['mode'])
checkpoint_dir = "%s"%(config.model.checkpoint_path)
tl.files.exists_or_mkdir(save_dir)
tl.files.exists_or_mkdir(checkpoint_dir)
###========================== DEFINE MODEL ============================###
t_image = tf.placeholder('float32', [batch_size, patch_size, patch_size, 3], name='t_image_input')
t_target_image = tf.placeholder('float32', [batch_size, patch_size*config.model.scale, patch_size*config.model.scale, 3], name='t_target_image')
t_target_image_down = tf.image.resize_images(t_target_image, size=[patch_size*2, patch_size*2], method=0, align_corners=False)
net_image2, net_grad2, net_image1, net_grad1 = LapSRN(t_image, is_train=True, reuse=False)
net_image2.print_params(False)
## test inference
net_image_test, net_grad_test, _, _ = LapSRN(t_image, is_train=False, reuse=True)
###========================== DEFINE TRAIN OPS ==========================###
loss2 = compute_charbonnier_loss(net_image2.outputs, t_target_image, is_mean=True)
loss1 = compute_charbonnier_loss(net_image1.outputs, t_target_image_down, is_mean=True)
g_loss = loss1 + loss2 * 4
g_vars = tl.layers.get_variables_with_name('LapSRN', True, True)
with tf.variable_scope('learning_rate'):
lr_v = tf.Variable(config.train.lr_init, trainable=False)
g_optim = tf.train.AdamOptimizer(lr_v, beta1=config.train.beta1).minimize(g_loss, var_list=g_vars)
###========================== RESTORE MODEL =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), network=net_image2)
###========================== PRE-LOAD DATA ===========================###
train_hr_list,train_lr_list,valid_hr_list,valid_lr_list = load_file_list()
###========================== INTERMEDIATE RESULT ===============================###
sample_ind = 37
sample_input_imgs,sample_output_imgs = prepare_nn_data(valid_hr_list,valid_lr_list,sample_ind)
tl.vis.save_images(truncate_imgs_fn(sample_input_imgs), [ni, ni], save_dir+'/train_sample_input.png')
tl.vis.save_images(truncate_imgs_fn(sample_output_imgs), [ni, ni], save_dir+'/train_sample_output.png')
###========================== TRAINING ====================###
sess.run(tf.assign(lr_v, config.train.lr_init))
print(" ** learning rate: %f" % config.train.lr_init)
for epoch in range(config.train.n_epoch):
## update learning rate
if epoch != 0 and (epoch % config.train.decay_iter == 0):
lr_decay = config.train.lr_decay ** (epoch // config.train.decay_iter)
lr = config.train.lr_init * lr_decay
sess.run(tf.assign(lr_v, lr))
print(" ** learning rate: %f" % (lr))
epoch_time = time.time()
total_g_loss, n_iter = 0, 0
## load image data
idx_list = np.random.permutation(len(train_hr_list))
for idx_file in range(len(idx_list)):
step_time = time.time()
batch_input_imgs,batch_output_imgs = prepare_nn_data(train_hr_list,train_lr_list,idx_file)
errM, _ = sess.run([g_loss, g_optim], {t_image: batch_input_imgs, t_target_image: batch_output_imgs})
total_g_loss += errM
n_iter += 1
print("[*] Epoch: [%2d/%2d] time: %4.4fs, loss: %.8f" % (epoch, config.train.n_epoch, time.time() - epoch_time, total_g_loss/n_iter))
## save model and evaluation on sample set
if (epoch >= 0):
tl.files.save_npz(net_image2.all_params, name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']), sess=sess)
if config.train.dump_intermediate_result is True:
sample_out, sample_grad_out = sess.run([net_image_test.outputs,net_grad_test.outputs], {t_image: sample_input_imgs})#; print('gen sub-image:', out.shape, out.min(), out.max())
tl.vis.save_images(truncate_imgs_fn(sample_out), [ni, ni], save_dir+'/train_predict_%d.png' % epoch)
tl.vis.save_images(truncate_imgs_fn(np.abs(sample_grad_out)), [ni, ni], save_dir+'/train_grad_predict_%d.png' % epoch)
def test(file):
try:
img = get_imgs_fn(file)
except IOError:
print('cannot open %s'%(file))
else:
checkpoint_dir = config.model.checkpoint_path
save_dir = "%s/%s"%(config.model.result_path,tl.global_flag['mode'])
input_image = normalize_imgs_fn(img)
size = input_image.shape
print('Input size: %s,%s,%s'%(size[0],size[1],size[2]))
t_image = tf.placeholder('float32', [None,size[0],size[1],size[2]], name='input_image')
net_g, _, _, _ = LapSRN(t_image, is_train=False, reuse=False)
###========================== RESTORE G =============================###
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
tl.layers.initialize_global_variables(sess)
tl.files.load_and_assign_npz(sess=sess, name=checkpoint_dir+'/params_train.npz', network=net_g)
###======================= TEST =============================###
start_time = time.time()
out = sess.run(net_g.outputs, {t_image: [input_image]})
print("took: %4.4fs" % (time.time() - start_time))
tl.files.exists_or_mkdir(save_dir)
tl.vis.save_image(truncate_imgs_fn(out[0,:,:,:]), save_dir+'/test_out.png')
tl.vis.save_image(input_image, save_dir+'/test_input.png')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['train','test'], default='train', help='select mode')
parser.add_argument('-f','--file', help='input file')
args = parser.parse_args()
tl.global_flag['mode'] = args.mode
if tl.global_flag['mode'] == 'train':
train()
elif tl.global_flag['mode'] == 'test':
if (args.file is None):
raise Exception("Please enter input file name for test mode")
test(args.file)
else:
raise Exception("Unknow --mode")
| [
"tensorflow.image.resize_images",
"numpy.sqrt",
"tensorlayer.layers.initialize_global_variables",
"tensorlayer.files.load_and_assign_npz",
"os.listdir",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"tensorlayer.layers.get_variables_with_name",
"tensorflow.assign",
"tensorlayer.vis.save_ima... | [((371, 403), 'numpy.sqrt', 'np.sqrt', (['config.train.batch_size'], {}), '(config.train.batch_size)\n', (378, 403), True, 'import numpy as np\n'), ((2259, 2308), 'numpy.empty', 'np.empty', (['[batch_size, patch_size, patch_size, 3]'], {}), '([batch_size, patch_size, patch_size, 3])\n', (2267, 2308), True, 'import numpy as np\n'), ((2325, 2382), 'numpy.empty', 'np.empty', (['[batch_size, out_patch_size, out_patch_size, 3]'], {}), '([batch_size, out_patch_size, out_patch_size, 3])\n', (2333, 2382), True, 'import numpy as np\n'), ((3499, 3533), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['save_dir'], {}), '(save_dir)\n', (3523, 3533), True, 'import tensorlayer as tl\n'), ((3538, 3578), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3562, 3578), True, 'import tensorlayer as tl\n'), ((3673, 3766), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[batch_size, patch_size, patch_size, 3]'], {'name': '"""t_image_input"""'}), "('float32', [batch_size, patch_size, patch_size, 3], name=\n 't_image_input')\n", (3687, 3766), True, 'import tensorflow as tf\n'), ((3783, 3919), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[batch_size, patch_size * config.model.scale, patch_size * config.model.\n scale, 3]'], {'name': '"""t_target_image"""'}), "('float32', [batch_size, patch_size * config.model.scale, \n patch_size * config.model.scale, 3], name='t_target_image')\n", (3797, 3919), True, 'import tensorflow as tf\n'), ((3937, 4050), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['t_target_image'], {'size': '[patch_size * 2, patch_size * 2]', 'method': '(0)', 'align_corners': '(False)'}), '(t_target_image, size=[patch_size * 2, patch_size * 2\n ], method=0, align_corners=False)\n', (3959, 4050), True, 'import tensorflow as tf\n'), ((4594, 4649), 'tensorlayer.layers.get_variables_with_name', 'tl.layers.get_variables_with_name', (['"""LapSRN"""', '(True)', '(True)'], {}), "('LapSRN', True, True)\n", (4627, 4649), True, 'import tensorlayer as tl\n'), ((5060, 5103), 'tensorlayer.layers.initialize_global_variables', 'tl.layers.initialize_global_variables', (['sess'], {}), '(sess)\n', (5097, 5103), True, 'import tensorlayer as tl\n'), ((9180, 9205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9203, 9205), False, 'import argparse\n'), ((2438, 2490), 'random.randint', 'random.randint', (['(0)', '(input_image.shape[0] - patch_size)'], {}), '(0, input_image.shape[0] - patch_size)\n', (2452, 2490), False, 'import os, time, random\n'), ((2511, 2563), 'random.randint', 'random.randint', (['(0)', '(input_image.shape[1] - patch_size)'], {}), '(0, input_image.shape[1] - patch_size)\n', (2525, 2563), False, 'import os, time, random\n'), ((2816, 2853), 'numpy.expand_dims', 'np.expand_dims', (['input_cropped'], {'axis': '(0)'}), '(input_cropped, axis=0)\n', (2830, 2853), True, 'import numpy as np\n'), ((3225, 3263), 'numpy.expand_dims', 'np.expand_dims', (['output_cropped'], {'axis': '(0)'}), '(output_cropped, axis=0)\n', (3239, 3263), True, 'import numpy as np\n'), ((4664, 4698), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learning_rate"""'], {}), "('learning_rate')\n", (4681, 4698), True, 'import tensorflow as tf\n'), ((4715, 4765), 'tensorflow.Variable', 'tf.Variable', (['config.train.lr_init'], {'trainable': '(False)'}), '(config.train.lr_init, trainable=False)\n', (4726, 4765), True, 'import tensorflow as tf\n'), ((5903, 5940), 'tensorflow.assign', 'tf.assign', (['lr_v', 'config.train.lr_init'], {}), '(lr_v, config.train.lr_init)\n', (5912, 5940), True, 'import tensorflow as tf\n'), ((6391, 6402), 'time.time', 'time.time', ([], {}), '()\n', (6400, 6402), False, 'import os, time, random\n'), ((8198, 8283), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[None, size[0], size[1], size[2]]'], {'name': '"""input_image"""'}), "('float32', [None, size[0], size[1], size[2]], name='input_image'\n )\n", (8212, 8283), True, 'import tensorflow as tf\n'), ((8540, 8583), 'tensorlayer.layers.initialize_global_variables', 'tl.layers.initialize_global_variables', (['sess'], {}), '(sess)\n', (8577, 8583), True, 'import tensorlayer as tl\n'), ((8592, 8693), 'tensorlayer.files.load_and_assign_npz', 'tl.files.load_and_assign_npz', ([], {'sess': 'sess', 'name': "(checkpoint_dir + '/params_train.npz')", 'network': 'net_g'}), "(sess=sess, name=checkpoint_dir +\n '/params_train.npz', network=net_g)\n", (8620, 8693), True, 'import tensorlayer as tl\n'), ((8783, 8794), 'time.time', 'time.time', ([], {}), '()\n', (8792, 8794), False, 'import os, time, random\n'), ((8931, 8965), 'tensorlayer.files.exists_or_mkdir', 'tl.files.exists_or_mkdir', (['save_dir'], {}), '(save_dir)\n', (8955, 8965), True, 'import tensorlayer as tl\n'), ((9058, 9118), 'tensorlayer.vis.save_image', 'tl.vis.save_image', (['input_image', "(save_dir + '/test_input.png')"], {}), "(input_image, save_dir + '/test_input.png')\n", (9075, 9118), True, 'import tensorlayer as tl\n'), ((975, 996), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (985, 996), False, 'import os, time, random\n'), ((1184, 1205), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1194, 1205), False, 'import os, time, random\n'), ((1393, 1414), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1403, 1414), False, 'import os, time, random\n'), ((1602, 1623), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1612, 1623), False, 'import os, time, random\n'), ((4781, 4835), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_v'], {'beta1': 'config.train.beta1'}), '(lr_v, beta1=config.train.beta1)\n', (4803, 4835), True, 'import tensorflow as tf\n'), ((4985, 5054), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (4999, 5054), True, 'import tensorflow as tf\n'), ((6598, 6609), 'time.time', 'time.time', ([], {}), '()\n', (6607, 6609), False, 'import os, time, random\n'), ((1015, 1041), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1027, 1041), False, 'import os, time, random\n'), ((1224, 1250), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1236, 1250), False, 'import os, time, random\n'), ((1433, 1459), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1445, 1459), False, 'import os, time, random\n'), ((1642, 1668), 'os.path.join', 'os.path.join', (['directory', 'y'], {}), '(directory, y)\n', (1654, 1668), False, 'import os, time, random\n'), ((6298, 6317), 'tensorflow.assign', 'tf.assign', (['lr_v', 'lr'], {}), '(lr_v, lr)\n', (6307, 6317), True, 'import tensorflow as tf\n'), ((8461, 8530), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (8475, 8530), True, 'import tensorflow as tf\n'), ((8891, 8902), 'time.time', 'time.time', ([], {}), '()\n', (8900, 8902), False, 'import os, time, random\n'), ((6987, 6998), 'time.time', 'time.time', ([], {}), '()\n', (6996, 6998), False, 'import os, time, random\n'), ((7682, 7705), 'numpy.abs', 'np.abs', (['sample_grad_out'], {}), '(sample_grad_out)\n', (7688, 7705), True, 'import numpy as np\n'), ((568, 597), 'tensorflow.subtract', 'tf.subtract', (['tensor1', 'tensor2'], {}), '(tensor1, tensor2)\n', (579, 597), True, 'import tensorflow as tf\n'), ((692, 721), 'tensorflow.subtract', 'tf.subtract', (['tensor1', 'tensor2'], {}), '(tensor1, tensor2)\n', (703, 721), True, 'import tensorflow as tf\n')] |
import lxml.html
from billy.scrape.committees import CommitteeScraper, Committee
from apiclient import ApiClient
from .utils import get_with_increasing_timeout
from scrapelib import HTTPError
class INCommitteeScraper(CommitteeScraper):
jurisdiction = 'in'
def process_special_members(self,comm,comm_json,role_name):
role_dict = {"chair":"Chair",
"viceChair": "Vice Chair",
"rankingMinMember":"Ranking Minority Member"}
try:
mem = comm_json[role_name]
except KeyError:
return
if mem:
person = mem["firstName"]+" "+mem["lastName"]
comm.add_member(person,role=role_dict[role_name])
return person
return None
def get_subcommittee_info(self,session):
#api gives NO way of finding out who owns
#a subcommittee. It can be found based in indenting(!)
#here: http://iga.in.gov/legislative/2015/committees/standing
#so we're going to hit that and make a dictionary. yuck
#but this is less important than some other stuff
#so we're going to be OK if we timeout.
link = "http://iga.in.gov/legislative/{}/committees/standing".format(session)
html = get_with_increasing_timeout(self,link,fail=False)
sc_dict = {}
if html:
doc = lxml.html.fromstring(html.text)
committees = doc.xpath("//li[@class='committee-item']")
for c in committees:
comm_name = c.xpath("./a")[0]
comm_name = comm_name.text_content().strip()
subcomms = c.xpath(".//li[@class='subcommittee-item']")
for s in subcomms:
subcom_name = s.text_content().strip()
sc_dict[subcom_name] = comm_name
return sc_dict
def scrape(self,term,chambers):
t = next((item for item in self.metadata["terms"] if item["name"] == term),None)
session = max(t["sessions"])
subcomms = self.get_subcommittee_info(session)
api_base_url = "https://api.iga.in.gov"
html_base_url = "http://iga.in.gov/legislative/{}/committees/".format(session)
client = ApiClient(self)
r = client.get("committees",session=session)
all_pages = client.unpaginate(r)
for comm_info in all_pages:
#this is kind of roundabout, but needed in order
#to take advantage of all of our machinery to make
#sure we're not overloading their api
comm_link = comm_info["link"]
comm_name = comm_link.split("/")[-1]
if "withdrawn" in comm_name or "conference" in comm_name:
continue
try:
comm_json = client.get("committee",committee_link=comm_link[1:])
except HTTPError:
self.logger.warning("Page does not exist")
continue
try:
chamber = comm_json["chamber"]["name"]
except KeyError:
chamber = 'joint'
else:
if chamber == "Senate":
chamber = "upper"
elif chamber == "House":
chamber = "lower"
else:
raise AssertionError("Unknown committee chamber {}".format(chamber))
name = comm_json["name"]
try:
owning_comm = subcomms[name]
except KeyError:
name = name.replace("Statutory Committee on","").strip()
comm = Committee(chamber,name)
else:
name = name.replace("Statutory Committee on","").replace("Subcommittee","").strip()
comm = Committee(chamber,owning_comm,subcommittee=name)
chair = self.process_special_members(comm,comm_json,"chair")
vicechair = self.process_special_members(comm,comm_json,"viceChair")
ranking = self.process_special_members(comm,comm_json,"rankingMinMember")
#leadership is also listed in membership
#so we have to make sure we haven't seen them yet
comm_members = [m for m in [chair,vicechair,ranking] if m]
for mem in comm_json["members"]:
mem_name = mem["firstName"]+" "+mem["lastName"]
if mem_name not in comm_members:
comm_members.append(mem_name)
comm.add_member(mem_name)
api_source = api_base_url + comm_link
if comm_name[:10] == "committee_":
html_source = html_base_url + comm_name[10:]
comm.add_source(html_source)
comm.add_source(api_source)
self.save_committee(comm)
| [
"apiclient.ApiClient",
"billy.scrape.committees.Committee"
] | [((2230, 2245), 'apiclient.ApiClient', 'ApiClient', (['self'], {}), '(self)\n', (2239, 2245), False, 'from apiclient import ApiClient\n'), ((3771, 3821), 'billy.scrape.committees.Committee', 'Committee', (['chamber', 'owning_comm'], {'subcommittee': 'name'}), '(chamber, owning_comm, subcommittee=name)\n', (3780, 3821), False, 'from billy.scrape.committees import CommitteeScraper, Committee\n'), ((3606, 3630), 'billy.scrape.committees.Committee', 'Committee', (['chamber', 'name'], {}), '(chamber, name)\n', (3615, 3630), False, 'from billy.scrape.committees import CommitteeScraper, Committee\n')] |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import requests
from lxml import html
from . import lesson
USER_TYPE = {"student": "elev", "teacher": "laerer"}
LESSON_STATUS = {None: "normal", "Ændret!": "changed", "Aflyst!": "cancelled"}
class UserDoesNotExistError(Exception):
""" Attempted to get a non-existing user from Lectio. """
class IdNotFoundInLinkError(Exception):
""" All lessons with a link should include an ID. """
class InvalidStatusError(Exception):
""" Lesson status can only take the values Ændret!, Aflyst! and None. """
class InvalidTimeLineError(Exception):
""" The line doesn't include any valid formatting of time. """
class InvalidLocationError(Exception):
""" The line doesn't include any location. """
def _get_user_page(school_id, user_type, user_id, week=""):
URL_TEMPLATE = "https://www.lectio.dk/lectio/{0}/" \
"SkemaNy.aspx?type={1}&{1}id={2}&week={3}"
r = requests.get(URL_TEMPLATE.format(school_id,
USER_TYPE[user_type],
user_id,
week),
allow_redirects=False)
return r
def _get_lectio_weekformat_with_offset(offset):
today = datetime.date.today()
future_date = today + datetime.timedelta(weeks=offset)
week_number = "{0:02d}".format(future_date.isocalendar()[1])
year_number = str(future_date.isocalendar()[0])
lectio_week = week_number + year_number
return lectio_week
def _get_id_from_link(link):
match = re.search("(?:absid|ProeveholdId|outboundCensorID)=(\d+)", link)
if match is None:
raise IdNotFoundInLinkError("Couldn't find id in link: {}".format(
link))
return match.group(1)
def _get_complete_link(link):
return "https://www.lectio.dk" + link.split("&prevurl=", 1)[0]
def _is_status_line(line):
match = re.search("Ændret!|Aflyst!", line)
return match is not None
def _is_location_line(line):
match = re.search("Lokaler?: ", line)
return match is not None
def _is_time_line(line):
# Search for one of the following formats:
# 14/3-2016 Hele dagen
# 14/3-2016 15:20 til 16:50
# 8/4-2016 17:30 til 9/4-2016 01:00
# 7/12-2015 10:00 til 11:30
# 17/12-2015 10:00 til 11:30
match = re.search("\d{1,2}/\d{1,2}-\d{4} (?:Hele dagen|\d{2}:\d{2} til "
"(?:\d{1,2}/\d{1,2}-\d{4} )?\d{2}:\d{2})", line)
return match is not None
def _get_status_from_line(line):
try:
return LESSON_STATUS[line]
except KeyError:
raise InvalidStatusError("Line: '{}' has no valid status".format(line))
def _get_location_from_line(line):
match = re.search("Lokaler?: (.*)", line)
if match is None:
raise InvalidLocationError("No location found in line: '{}'"
.format(line))
return match.group(1)
def _get_date_from_match(match):
if match:
return datetime.datetime.strptime(match, "%d/%m-%Y").date()
else:
return None
def _get_time_from_match(match):
if match:
return datetime.datetime.strptime(match, "%H:%M").time()
else:
return None
def _get_time_from_line(line):
# Extract the following information in capture groups:
# 1 - start date
# 2 - start time
# 3 - end date
# 4 - end time
match = re.search("(\d{1,2}/\d{1,2}-\d{4})(?: (\d{2}:\d{2}) til "
"(\d{1,2}/\d{1,2}-\d{4})? ?(\d{2}:\d{2}))?", line)
if match is None:
raise InvalidTimeLineError("No time found in line: '{}'".format(line))
start_date = _get_date_from_match(match.group(1))
start_time = _get_time_from_match(match.group(2))
if start_time:
start = datetime.datetime.combine(start_date, start_time)
else:
start = start_date
end_date = _get_date_from_match(match.group(3))
end_time = _get_time_from_match(match.group(4))
if not end_date:
end_date = start_date
if end_time:
end = datetime.datetime.combine(end_date, end_time)
else:
end = end_date
return start, end
def _add_line_to_text(line, text):
if text != "":
text += "\n"
text += line
return text
def _add_section_to_summary(section, summary):
if summary != "" and section != "":
summary += " " + u"\u2022" + " "
summary += section
return summary
def _get_info_from_title(title):
summary = description = ""
status = start_time = end_time = location = None
lines = title.splitlines()
headerSection = True
for line in lines:
if headerSection:
if line == '':
headerSection = False
continue
if _is_status_line(line):
status = _get_status_from_line(line)
elif _is_time_line(line):
start_time, end_time = _get_time_from_line(line)
elif _is_location_line(line):
location = _get_location_from_line(line)
else:
summary = _add_section_to_summary(line, summary)
else:
description = _add_line_to_text(line, description)
return summary, status, start_time, end_time, location, description
def _parse_element_to_lesson(element):
link = element.get("href")
id = None
if link:
id = _get_id_from_link(link)
link = _get_complete_link(link)
summary, status, start_time, end_time, location, description = \
_get_info_from_title(element.get("data-additionalinfo"))
return lesson.Lesson(id, summary, status, start_time, end_time, location, description, link)
def _parse_page_to_lessons(page):
tree = html.fromstring(page)
# Find all a elements with class s2skemabrik in page
lesson_elements = tree.xpath("//a[contains(concat("
"' ', normalize-space(@class), ' '),"
"' s2skemabrik ')]")
lessons = []
for element in lesson_elements:
lessons.append(_parse_element_to_lesson(element))
return lessons
def _retreive_week_schedule(school_id, user_type, user_id, week):
r = _get_user_page(school_id, user_type, user_id, week)
schedule = _parse_page_to_lessons(r.content)
return schedule
def _filter_for_duplicates(schedule):
filtered_schedule = []
for lesson in schedule:
if lesson not in filtered_schedule:
filtered_schedule.append(lesson)
return filtered_schedule
def _retreive_user_schedule(school_id, user_type, user_id, n_weeks):
schedule = []
for week_offset in range(n_weeks + 1):
week = _get_lectio_weekformat_with_offset(week_offset)
week_schedule = _retreive_week_schedule(school_id,
user_type,
user_id,
week)
schedule += week_schedule
filtered_schedule = _filter_for_duplicates(schedule)
return filtered_schedule
def _user_exists(school_id, user_type, user_id):
r = _get_user_page(school_id, user_type, user_id)
return r.status_code == requests.codes.ok
def get_schedule(school_id, user_type, user_id, n_weeks):
if not _user_exists(school_id, user_type, user_id):
raise UserDoesNotExistError("Couldn't find user - school: {}, "
"type: {}, id: {} - in Lectio.".format(
school_id, user_type, user_id))
return _retreive_user_schedule(school_id, user_type, user_id, n_weeks)
| [
"datetime.datetime.strptime",
"lxml.html.fromstring",
"datetime.date.today",
"datetime.timedelta",
"datetime.datetime.combine",
"re.search"
] | [((1821, 1842), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1840, 1842), False, 'import datetime\n'), ((2129, 2194), 're.search', 're.search', (['"""(?:absid|ProeveholdId|outboundCensorID)=(\\\\d+)"""', 'link'], {}), "('(?:absid|ProeveholdId|outboundCensorID)=(\\\\d+)', link)\n", (2138, 2194), False, 'import re\n'), ((2499, 2533), 're.search', 're.search', (['"""Ændret!|Aflyst!"""', 'line'], {}), "('Ændret!|Aflyst!', line)\n", (2508, 2533), False, 'import re\n'), ((2606, 2635), 're.search', 're.search', (['"""Lokaler?: """', 'line'], {}), "('Lokaler?: ', line)\n", (2615, 2635), False, 'import re\n'), ((2915, 3045), 're.search', 're.search', (['"""\\\\d{1,2}/\\\\d{1,2}-\\\\d{4} (?:Hele dagen|\\\\d{2}:\\\\d{2} til (?:\\\\d{1,2}/\\\\d{1,2}-\\\\d{4} )?\\\\d{2}:\\\\d{2})"""', 'line'], {}), "(\n '\\\\d{1,2}/\\\\d{1,2}-\\\\d{4} (?:Hele dagen|\\\\d{2}:\\\\d{2} til (?:\\\\d{1,2}/\\\\d{1,2}-\\\\d{4} )?\\\\d{2}:\\\\d{2})'\n , line)\n", (2924, 3045), False, 'import re\n'), ((3309, 3342), 're.search', 're.search', (['"""Lokaler?: (.*)"""', 'line'], {}), "('Lokaler?: (.*)', line)\n", (3318, 3342), False, 'import re\n'), ((3985, 4110), 're.search', 're.search', (['"""(\\\\d{1,2}/\\\\d{1,2}-\\\\d{4})(?: (\\\\d{2}:\\\\d{2}) til (\\\\d{1,2}/\\\\d{1,2}-\\\\d{4})? ?(\\\\d{2}:\\\\d{2}))?"""', 'line'], {}), "(\n '(\\\\d{1,2}/\\\\d{1,2}-\\\\d{4})(?: (\\\\d{2}:\\\\d{2}) til (\\\\d{1,2}/\\\\d{1,2}-\\\\d{4})? ?(\\\\d{2}:\\\\d{2}))?'\n , line)\n", (3994, 4110), False, 'import re\n'), ((6314, 6335), 'lxml.html.fromstring', 'html.fromstring', (['page'], {}), '(page)\n', (6329, 6335), False, 'from lxml import html\n'), ((1869, 1901), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': 'offset'}), '(weeks=offset)\n', (1887, 1901), False, 'import datetime\n'), ((4362, 4411), 'datetime.datetime.combine', 'datetime.datetime.combine', (['start_date', 'start_time'], {}), '(start_date, start_time)\n', (4387, 4411), False, 'import datetime\n'), ((4638, 4683), 'datetime.datetime.combine', 'datetime.datetime.combine', (['end_date', 'end_time'], {}), '(end_date, end_time)\n', (4663, 4683), False, 'import datetime\n'), ((3574, 3619), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['match', '"""%d/%m-%Y"""'], {}), "(match, '%d/%m-%Y')\n", (3600, 3619), False, 'import datetime\n'), ((3721, 3763), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['match', '"""%H:%M"""'], {}), "(match, '%H:%M')\n", (3747, 3763), False, 'import datetime\n')] |
import pandas as pd
import numpy as np
from typing import Tuple
from itertools import product
from tqdm import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split
"""
Using cross validation on the random forest and returning
the average and standard deviation of the results.
"""
def rf_cross_val(train_data: pd.DataFrame, tags: pd.Series, rfparams: dict, fold=5):
f1_mac_lst = []
f1_mic_lst = []
# running the random forest "fold" times.
for i in range(fold):
train_and_val = train_test_split(train_data, tags, test_size=1 / fold)
f1_mac, f1_mic = rf_single_hyperparams(*train_and_val, rfparams=rfparams)
f1_mac_lst.append(f1_mac)
f1_mic_lst.append(f1_mic)
# computing average and std.
f1_mac_lst = np.array(f1_mac_lst)
mean_mac_f1 = f1_mac_lst.mean(axis=0)
std_mac_f1 = f1_mac_lst.std(axis=0)
f1_mic_lst = np.array(f1_mic_lst)
mean_mic_f1 = f1_mic_lst.mean(axis=0)
std_mic_f1 = f1_mic_lst.std(axis=0)
return mean_mac_f1, std_mac_f1, mean_mic_f1, std_mic_f1
"""
A single run of the random forest.
"""
def rf_single_hyperparams(train_data: pd.DataFrame, test: pd.DataFrame, train_tag: pd.Series,
test_tag: pd.Series, rfparams: dict) -> Tuple[np.ndarray, np.ndarray]:
# first creating the singular model, then using it in a one VS rest model.
rf_single_model = RandomForestClassifier(bootstrap=True, n_jobs=-1, **rfparams)
rf_model = OneVsRestClassifier(rf_single_model).fit(train_data, train_tag)
# predicting and evaluating the results.
prediction = rf_model.predict(test)
return evaluate(prediction, test_tag)
"""
Calculating f1 scores, both macro and micro, as evaluation.
"""
def evaluate(prediction, tag) -> Tuple[np.ndarray, np.ndarray]:
f1_mac = f1_score(prediction, tag, labels=range(9), average="macro")
f1_mic = f1_score(prediction, tag, labels=range(9), average="micro")
return f1_mac, f1_mic
"""
Choosing the optimal parameters for the random forest using grid search.
"""
def choose_rf_params(df: pd.DataFrame, tags: pd.Series):
# the five parameters we are using to maximize.
n_estimators_lst = [int(x) for x in np.linspace(start=100, stop=1300, num=7)]
max_features_lst = ['log2', 'sqrt']
max_depth_lst = [int(x) for x in np.linspace(10, 100, num=10)]
min_split_lst = [2, 5, 10]
min_leaf_lst = [1, 2, 4]
maxmacf1 = 0
maxmicf1 = 0
# running on all possible combinations.
for n_est, max_feat, max_depth, min_splt, min_leaf in tqdm(product(n_estimators_lst,
max_features_lst, max_depth_lst, min_split_lst, min_leaf_lst), total=1260):
paramsgrid = {"n_estimators": n_est, "max_features": max_feat, "max_depth": max_depth,
"min_samples_split": min_splt, "min_samples_leaf": min_leaf}
# running the model with cross validation, to get a more accurate score.
mean_mac_f1, std_mac_f1, mean_mic_f1, std_mic_f1 = rf_cross_val(df, tags, paramsgrid)
# saving the best parameters and their score.
if mean_mic_f1 > maxmicf1:
maxmicf1 = mean_mic_f1
micf1std = std_mic_f1
f1_mic_params = paramsgrid
if mean_mac_f1 > maxmacf1:
maxmacf1 = mean_mac_f1
macf1std = std_mac_f1
f1_mac_params = paramsgrid
# returning both the best f1 micro and f1 macro scores, and their repective parameters.
return maxmicf1, micf1std, f1_mic_params, maxmacf1, macf1std, f1_mac_params
| [
"sklearn.model_selection.train_test_split",
"itertools.product",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.linspace",
"sklearn.multiclass.OneVsRestClassifier"
] | [((924, 944), 'numpy.array', 'np.array', (['f1_mac_lst'], {}), '(f1_mac_lst)\n', (932, 944), True, 'import numpy as np\n'), ((1045, 1065), 'numpy.array', 'np.array', (['f1_mic_lst'], {}), '(f1_mic_lst)\n', (1053, 1065), True, 'import numpy as np\n'), ((1544, 1605), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'bootstrap': '(True)', 'n_jobs': '(-1)'}), '(bootstrap=True, n_jobs=-1, **rfparams)\n', (1566, 1605), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((668, 722), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_data', 'tags'], {'test_size': '(1 / fold)'}), '(train_data, tags, test_size=1 / fold)\n', (684, 722), False, 'from sklearn.model_selection import train_test_split\n'), ((2700, 2791), 'itertools.product', 'product', (['n_estimators_lst', 'max_features_lst', 'max_depth_lst', 'min_split_lst', 'min_leaf_lst'], {}), '(n_estimators_lst, max_features_lst, max_depth_lst, min_split_lst,\n min_leaf_lst)\n', (2707, 2791), False, 'from itertools import product\n'), ((1621, 1657), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['rf_single_model'], {}), '(rf_single_model)\n', (1640, 1657), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((2349, 2389), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1300)', 'num': '(7)'}), '(start=100, stop=1300, num=7)\n', (2360, 2389), True, 'import numpy as np\n'), ((2468, 2496), 'numpy.linspace', 'np.linspace', (['(10)', '(100)'], {'num': '(10)'}), '(10, 100, num=10)\n', (2479, 2496), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import datetime as dt
import operator
import time
import os
def most_common_day(dataframe):
start = time.time()
most_common_day_dict = dataframe['Day of the Week'].value_counts().to_dict()
popular_day_of_the_week = max(most_common_day_dict.items() , key=operator.itemgetter(1))[0]
print( 'Day :' , popular_day_of_the_week , 'Counts: ' , most_common_day_dict[popular_day_of_the_week])
print(time.time() - start)
def most_common_hour(dataframe):
start = time.time()
most_common_hour_dict = dataframe['Hour'].value_counts().to_dict()
popular_hour_of_the_day = max(most_common_hour_dict.items() , key=operator.itemgetter(1))[0]
print( 'Hour :' , popular_hour_of_the_day , 'Counts: ' , most_common_hour_dict[popular_hour_of_the_day])
print(time.time() - start)
def most_common_start_and_end_station(dataframe):
start = time.time()
most_common_start_station_of_the_month_dict = dataframe['Start station'].value_counts().to_dict()
popular_start_station_of_the_month = max(most_common_start_station_of_the_month_dict.items() , key=operator.itemgetter(1))[0]
print( 'Start Station :' , popular_start_station_of_the_month , 'Counts: ' , most_common_start_station_of_the_month_dict[popular_start_station_of_the_month])
most_common_end_station_of_the_month_dict = dataframe['End station'].value_counts().to_dict()
popular_end_station_of_the_month = max(most_common_end_station_of_the_month_dict.items() , key=operator.itemgetter(1))[0]
print( 'End Station :' , popular_end_station_of_the_month , 'Counts: ' , most_common_end_station_of_the_month_dict[popular_end_station_of_the_month])
print(time.time() - start)
def most_common_route(dataframe):
start = time.time()
routing_df_temp = dataframe.groupby(['Start station' , 'End station']).size().reset_index(name='Times')
routing_temp_dict = routing_df_temp['Times'].sort_values().to_dict()
index_of_popular_route = max(routing_temp_dict.items() , key=operator.itemgetter(1))[0]
print( 'Most popular route: ' ,routing_df_temp.loc[index_of_popular_route])
print(time.time() - start)
def trip_duration(dataframe):
start = time.time()
print('total: ' , sum(dataframe['Duration']))
print('mean: ' , dataframe['Duration'].mean())
print(time.time() - start)
def user_type(dataframe):
start = time.time()
print(dataframe['Member type'].value_counts())
print(time.time() - start)
def gender(dataframe ,city_name):
start = time.time()
if city_name == 'washington':
return
print(dataframe['Gender'].value_counts())
print(time.time() - start)
def birthyear(dataframe , city_name):
start = time.time()
if city_name == 'washington':
return
dict_brithyear_frequent = dataframe['Birthyear'].value_counts().to_dict()
del dict_brithyear_frequent[0]
frequented_birth_year = int(max(dict_brithyear_frequent.items() , key=operator.itemgetter(1))[0])
min_birthyaer = int(min(dict_brithyear_frequent.keys()))
max_birthyaer = int(max(dict_brithyear_frequent.keys()))
print('most common birth year: ', frequented_birth_year , 'min birth year: ' , min_birthyaer , 'max birth year: ' , max_birthyaer)
print(time.time() - start)
def main():
user_input = 'Y'
while user_input == 'Y':
city_name = input('Welcome plz enter your city name: ')
if os.path.exists(city_name + '.csv'):
name_of_city = pd.read_csv(city_name + '.csv')
else:
print('this city dataset dosent exist!')
continue
how_filtering = input('plase enter your filter type : both , month , day , none :')
if how_filtering == 'month':
filtering_month = input('which month : ')
#month filtering
by_month_filter_df = name_of_city[name_of_city['Month'] == filtering_month]
#most common day in the month
most_common_day(by_month_filter_df)
#most common hour in the month
most_common_hour(by_month_filter_df)
#most common start and end station in the month
most_common_start_and_end_station(by_month_filter_df)
#most common route
most_common_route(by_month_filter_df)
#trip duration
trip_duration(by_month_filter_df)
#user type
user_type(by_month_filter_df)
#gender
gender(by_month_filter_df , city_name)
#birthyear
birthyear(by_month_filter_df ,city_name)
elif how_filtering == 'day':
filtering_day = int(input('which day : '))
#day filtering
by_day_filter_df = name_of_city[name_of_city['Day'] == filtering_day]
#most common hour of the day
most_common_hour(by_day_filter_df)
#most common start and end station in the day
most_common_start_and_end_station(by_day_filter_df)
#most common route
most_common_route(by_day_filter_df)
#trip duration
trip_duration(by_day_filter_df)
#user type
user_type(by_day_filter_df)
#gender
gender(by_day_filter_df,city_name)
#birthyear
birthyear(by_day_filter_df , city_name)
elif how_filtering == 'both':
filtering_month = input('which month : ')
filtering_day = int(input('which day : '))
#month and day filtering
by_month_and_day_filter_df = name_of_city[(name_of_city['Month'] == filtering_month) & (name_of_city['Day']==filtering_day)]
#most common hour of the day
most_common_hour(by_month_and_day_filter_df)
#most common start and end station in the day
most_common_start_and_end_station(by_month_and_day_filter_df)
#most common route
most_common_route(by_month_and_day_filter_df)
#trip duration
trip_duration(by_month_and_day_filter_df)
#user type
user_type(by_month_and_day_filter_df)
#gender
gender(by_month_and_day_filter_df,city_name)
#birthyear
birthyear(by_month_and_day_filter_df , city_name)
elif how_filtering == 'none':
most_common_day(name_of_city)
#most common hour of the day
most_common_hour(name_of_city)
#most common start and end station in the day
most_common_start_and_end_station(name_of_city)
#most common route
most_common_route(name_of_city)
#trip duration
trip_duration(name_of_city)
#user type
user_type(name_of_city)
#gender
gender(name_of_city,city_name)
#birthyear
birthyear(name_of_city, city_name)
user_input = input('do you want to continue Y/N: ')
if __name__ == '__main__':
main() | [
"os.path.exists",
"time.time",
"operator.itemgetter",
"pandas.read_csv"
] | [((145, 156), 'time.time', 'time.time', ([], {}), '()\n', (154, 156), False, 'import time\n'), ((519, 530), 'time.time', 'time.time', ([], {}), '()\n', (528, 530), False, 'import time\n'), ((902, 913), 'time.time', 'time.time', ([], {}), '()\n', (911, 913), False, 'import time\n'), ((1765, 1776), 'time.time', 'time.time', ([], {}), '()\n', (1774, 1776), False, 'import time\n'), ((2204, 2215), 'time.time', 'time.time', ([], {}), '()\n', (2213, 2215), False, 'import time\n'), ((2387, 2398), 'time.time', 'time.time', ([], {}), '()\n', (2396, 2398), False, 'import time\n'), ((2529, 2540), 'time.time', 'time.time', ([], {}), '()\n', (2538, 2540), False, 'import time\n'), ((2719, 2730), 'time.time', 'time.time', ([], {}), '()\n', (2728, 2730), False, 'import time\n'), ((3421, 3455), 'os.path.exists', 'os.path.exists', (["(city_name + '.csv')"], {}), "(city_name + '.csv')\n", (3435, 3455), False, 'import os\n'), ((451, 462), 'time.time', 'time.time', ([], {}), '()\n', (460, 462), False, 'import time\n'), ((818, 829), 'time.time', 'time.time', ([], {}), '()\n', (827, 829), False, 'import time\n'), ((1697, 1708), 'time.time', 'time.time', ([], {}), '()\n', (1706, 1708), False, 'import time\n'), ((2140, 2151), 'time.time', 'time.time', ([], {}), '()\n', (2149, 2151), False, 'import time\n'), ((2327, 2338), 'time.time', 'time.time', ([], {}), '()\n', (2336, 2338), False, 'import time\n'), ((2460, 2471), 'time.time', 'time.time', ([], {}), '()\n', (2469, 2471), False, 'import time\n'), ((2646, 2657), 'time.time', 'time.time', ([], {}), '()\n', (2655, 2657), False, 'import time\n'), ((3262, 3273), 'time.time', 'time.time', ([], {}), '()\n', (3271, 3273), False, 'import time\n'), ((3484, 3515), 'pandas.read_csv', 'pd.read_csv', (["(city_name + '.csv')"], {}), "(city_name + '.csv')\n", (3495, 3515), True, 'import pandas as pd\n'), ((307, 329), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (326, 329), False, 'import operator\n'), ((672, 694), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (691, 694), False, 'import operator\n'), ((1119, 1141), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1138, 1141), False, 'import operator\n'), ((1506, 1528), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1525, 1528), False, 'import operator\n'), ((2023, 2045), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2042, 2045), False, 'import operator\n'), ((2967, 2989), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2986, 2989), False, 'import operator\n')] |
#!/usr/bin/env python3
"""
"""
import liblo
from pythonosc import udp_client
from ptpython.python_input import PythonInput
PORT = 7000
client = udp_client.SimpleUDPClient('127.0.0.1', PORT)
def send(data):
liblo.send(liblo.Address(PORT), data)
def send_prefix(prefix, data):
client.send_message(prefix, data)
def main():
prompt = PythonInput(vi_mode=True)
while True:
try:
text = prompt.app.run()
except KeyboardInterrupt:
continue
except EOFError:
break
else:
is_statement = False
try:
code = compile(text, '<stdin>', 'eval')
except SyntaxError:
is_statement = True
code = compile(text, '<stdin>', 'exec')
if is_statement:
send(text)
else:
send_prefix('/eval', text)
print("goodbye")
if __name__ == "__main__":
main() | [
"pythonosc.udp_client.SimpleUDPClient",
"ptpython.python_input.PythonInput",
"liblo.Address"
] | [((147, 192), 'pythonosc.udp_client.SimpleUDPClient', 'udp_client.SimpleUDPClient', (['"""127.0.0.1"""', 'PORT'], {}), "('127.0.0.1', PORT)\n", (173, 192), False, 'from pythonosc import udp_client\n'), ((349, 374), 'ptpython.python_input.PythonInput', 'PythonInput', ([], {'vi_mode': '(True)'}), '(vi_mode=True)\n', (360, 374), False, 'from ptpython.python_input import PythonInput\n'), ((225, 244), 'liblo.Address', 'liblo.Address', (['PORT'], {}), '(PORT)\n', (238, 244), False, 'import liblo\n')] |
from functools import partial
import torch
import dphsir.solvers.fns.inpaint as task
from dphsir.degrades.inpaint import FastHyStripe
from dphsir.denoisers.wrapper import GRUNetDenoiser
from dphsir.solvers import callbacks
from dphsir.solvers.base import ADMMSolver
from dphsir.solvers.params import admm_log_descent
from dphsir.utils.io import loadmat
from dphsir.metrics import mpsnr
path = 'Lehavim_0910-1717.mat'
data = loadmat(path)
gt = data['gt']
degrade = FastHyStripe()
low, mask = degrade(gt)
mask = mask.astype('float')
device = torch.device('cuda:0')
model_path = 'unet_qrnn3d.pth'
denoiser = GRUNetDenoiser(model_path).to(device)
init = partial(task.inits.none, mask=mask)
prox = task.Prox(mask).to(device)
denoise = denoiser
solver = ADMMSolver(init, prox, denoise).to(device)
iter_num = 24
rhos, sigmas = admm_log_descent(sigma=max(0.255/255., 0),
iter_num=iter_num,
modelSigma1=5, modelSigma2=4,
w=1,
lam=0.6)
pred = solver.restore(low, iter_num=iter_num, rhos=rhos, sigmas=sigmas,
callbacks=[callbacks.ProgressBar(iter_num)])
print(pred.shape)
print(mpsnr(init(low), gt))
print(mpsnr(pred, gt))
# Expected: 74.88
| [
"dphsir.solvers.callbacks.ProgressBar",
"dphsir.utils.io.loadmat",
"dphsir.degrades.inpaint.FastHyStripe",
"functools.partial",
"dphsir.solvers.fns.inpaint.Prox",
"dphsir.metrics.mpsnr",
"dphsir.denoisers.wrapper.GRUNetDenoiser",
"dphsir.solvers.base.ADMMSolver",
"torch.device"
] | [((427, 440), 'dphsir.utils.io.loadmat', 'loadmat', (['path'], {}), '(path)\n', (434, 440), False, 'from dphsir.utils.io import loadmat\n'), ((468, 482), 'dphsir.degrades.inpaint.FastHyStripe', 'FastHyStripe', ([], {}), '()\n', (480, 482), False, 'from dphsir.degrades.inpaint import FastHyStripe\n'), ((545, 567), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (557, 567), False, 'import torch\n'), ((657, 692), 'functools.partial', 'partial', (['task.inits.none'], {'mask': 'mask'}), '(task.inits.none, mask=mask)\n', (664, 692), False, 'from functools import partial\n'), ((1255, 1270), 'dphsir.metrics.mpsnr', 'mpsnr', (['pred', 'gt'], {}), '(pred, gt)\n', (1260, 1270), False, 'from dphsir.metrics import mpsnr\n'), ((611, 637), 'dphsir.denoisers.wrapper.GRUNetDenoiser', 'GRUNetDenoiser', (['model_path'], {}), '(model_path)\n', (625, 637), False, 'from dphsir.denoisers.wrapper import GRUNetDenoiser\n'), ((700, 715), 'dphsir.solvers.fns.inpaint.Prox', 'task.Prox', (['mask'], {}), '(mask)\n', (709, 715), True, 'import dphsir.solvers.fns.inpaint as task\n'), ((755, 786), 'dphsir.solvers.base.ADMMSolver', 'ADMMSolver', (['init', 'prox', 'denoise'], {}), '(init, prox, denoise)\n', (765, 786), False, 'from dphsir.solvers.base import ADMMSolver\n'), ((1168, 1199), 'dphsir.solvers.callbacks.ProgressBar', 'callbacks.ProgressBar', (['iter_num'], {}), '(iter_num)\n', (1189, 1199), False, 'from dphsir.solvers import callbacks\n')] |
import re
import sys
import json
import supybot.world as world
import supybot.utils as utils
from supybot import httpserver
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
if sys.version_info[0] < 3:
from urllib import urlencode
else:
from urllib.parse import urlencode
_ = PluginInternationalization('Realhook')
class RealhookCallback(httpserver.SupyHTTPServerCallback):
name = 'Realraum website callback'
defaultResponse = _("""
You shouldn't be there, this subfolder is not for you. Go back to the
index and try out other plugins (if any).""")
def doPost(self, handler, path, form):
try:
self.plugin.announce.onPayload(form)
except Exception as e:
raise e
finally:
self.send_response(200)
self.end_headers()
instance = None
bold = ircutils.bold
@internationalizeDocstring
class Realhook(callbacks.Plugin):
"""Add the help for "@plugin help Website" here
This should describe *how* to use this plugin."""
threaded = True
def __init__(self, irc):
global instance
self.__parent = super(Realhook, self)
callbacks.Plugin.__init__(self, irc)
instance = self
callback = RealhookCallback()
callback.plugin = self
httpserver.hook('realhook', callback)
class announce(callbacks.Commands):
def onPayload(self, form):
for irc in world.ircs:
if irc.network == 'oftc':
text = form#['text'].value
irc.queueMsg(ircmsgs.privmsg('#realraum', text))
def die(self):
self.__parent.die()
httpserver.unhook('realhook')
Class = Realhook
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| [
"supybot.callbacks.Plugin.__init__",
"supybot.httpserver.hook",
"supybot.ircmsgs.privmsg",
"supybot.i18n.PluginInternationalization",
"supybot.httpserver.unhook"
] | [((488, 526), 'supybot.i18n.PluginInternationalization', 'PluginInternationalization', (['"""Realhook"""'], {}), "('Realhook')\n", (514, 526), False, 'from supybot.i18n import PluginInternationalization, internationalizeDocstring\n'), ((1353, 1389), 'supybot.callbacks.Plugin.__init__', 'callbacks.Plugin.__init__', (['self', 'irc'], {}), '(self, irc)\n', (1378, 1389), True, 'import supybot.callbacks as callbacks\n'), ((1492, 1529), 'supybot.httpserver.hook', 'httpserver.hook', (['"""realhook"""', 'callback'], {}), "('realhook', callback)\n", (1507, 1529), False, 'from supybot import httpserver\n'), ((1868, 1897), 'supybot.httpserver.unhook', 'httpserver.unhook', (['"""realhook"""'], {}), "('realhook')\n", (1885, 1897), False, 'from supybot import httpserver\n'), ((1775, 1809), 'supybot.ircmsgs.privmsg', 'ircmsgs.privmsg', (['"""#realraum"""', 'text'], {}), "('#realraum', text)\n", (1790, 1809), True, 'import supybot.ircmsgs as ircmsgs\n')] |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests TPU specfic parts of ModularGAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
from compare_gan import datasets
from compare_gan import test_utils
from compare_gan.gans import consts as c
from compare_gan.gans.modular_gan import ModularGAN
import tensorflow as tf
FLAGS = flags.FLAGS
class ModularGanTpuTest(parameterized.TestCase, test_utils.CompareGanTestCase):
def setUp(self):
super(ModularGanTpuTest, self).setUp()
self.model_dir = self._get_empty_model_dir()
self.run_config = tf.contrib.tpu.RunConfig(
model_dir=self.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
@parameterized.parameters([1, 2, 5])
def testBatchSize(self, disc_iters, use_tpu=True):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
self.assertLen(disc_args, disc_iters + 1) # D steps, G step.
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeSplitDiscCalls(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
deprecated_split_disc_calls=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
# Each D and G step calls discriminator twice: for real and fake images.
self.assertLen(disc_args, 2 * (disc_iters + 1))
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [8, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeExperimentalJointGenForDisc(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
experimental_joint_gen_for_disc=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, 2)
self.assertLen(disc_args, disc_iters + 1)
self.assertAllEqual(gen_args[0]["z"].shape.as_list(), [8 * disc_iters, 128])
self.assertAllEqual(gen_args[1]["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
if __name__ == "__main__":
tf.test.main()
| [
"compare_gan.gans.modular_gan.ModularGAN",
"absl.testing.parameterized.parameters",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.test.main",
"compare_gan.datasets.get_dataset"
] | [((1372, 1407), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1, 2, 5]'], {}), '([1, 2, 5])\n', (1396, 1407), False, 'from absl.testing import parameterized\n'), ((2356, 2391), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1, 2, 5]'], {}), '([1, 2, 5])\n', (2380, 2391), False, 'from absl.testing import parameterized\n'), ((3444, 3479), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[1, 2, 5]'], {}), '([1, 2, 5])\n', (3468, 3479), False, 'from absl.testing import parameterized\n'), ((4521, 4535), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (4533, 4535), True, 'import tensorflow as tf\n'), ((1635, 1666), 'compare_gan.datasets.get_dataset', 'datasets.get_dataset', (['"""cifar10"""'], {}), "('cifar10')\n", (1655, 1666), False, 'from compare_gan import datasets\n'), ((1677, 1753), 'compare_gan.gans.modular_gan.ModularGAN', 'ModularGAN', ([], {'dataset': 'dataset', 'parameters': 'parameters', 'model_dir': 'self.model_dir'}), '(dataset=dataset, parameters=parameters, model_dir=self.model_dir)\n', (1687, 1753), False, 'from compare_gan.gans.modular_gan import ModularGAN\n'), ((2619, 2650), 'compare_gan.datasets.get_dataset', 'datasets.get_dataset', (['"""cifar10"""'], {}), "('cifar10')\n", (2639, 2650), False, 'from compare_gan import datasets\n'), ((2661, 2775), 'compare_gan.gans.modular_gan.ModularGAN', 'ModularGAN', ([], {'dataset': 'dataset', 'parameters': 'parameters', 'deprecated_split_disc_calls': '(True)', 'model_dir': 'self.model_dir'}), '(dataset=dataset, parameters=parameters,\n deprecated_split_disc_calls=True, model_dir=self.model_dir)\n', (2671, 2775), False, 'from compare_gan.gans.modular_gan import ModularGAN\n'), ((3720, 3751), 'compare_gan.datasets.get_dataset', 'datasets.get_dataset', (['"""cifar10"""'], {}), "('cifar10')\n", (3740, 3751), False, 'from compare_gan import datasets\n'), ((3762, 3880), 'compare_gan.gans.modular_gan.ModularGAN', 'ModularGAN', ([], {'dataset': 'dataset', 'parameters': 'parameters', 'experimental_joint_gen_for_disc': '(True)', 'model_dir': 'self.model_dir'}), '(dataset=dataset, parameters=parameters,\n experimental_joint_gen_for_disc=True, model_dir=self.model_dir)\n', (3772, 3880), False, 'from compare_gan.gans.modular_gan import ModularGAN\n'), ((1319, 1366), 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': '(1)'}), '(iterations_per_loop=1)\n', (1343, 1366), True, 'import tensorflow as tf\n')] |
import os
import requests
import us
from django.core.management.base import BaseCommand
from geography.models import Division, DivisionLevel
from government.models import Body, Jurisdiction, Office
from tqdm import tqdm
BASE_URL = 'https://api.propublica.org/congress/v1/'
class Command(BaseCommand):
help = (
'Scrapes the ProPublica Congress API for federal Congress offices'
)
fed = Jurisdiction.objects.get(name="U.S. Federal Government")
def build_congressional_offices(self, chamber):
r = requests.get(
'{0}{1}/{2}/members.json'.format(BASE_URL, '115', chamber),
headers={
'X-API-Key': os.environ.get('PROPUBLICA_CONGRESS_API_KEY')
}
)
members = r.json()
print('Loading U.S. {0} offices'.format(chamber.title()))
for member in tqdm(members['results'][0]['members']):
full_state = us.states.lookup(member['state'])
if int(full_state.fips) > 56 or int(full_state.fips) == 11:
continue
if chamber == 'senate':
for class_tup in Office.SENATE_CLASSES:
if class_tup[0] == member['senate_class']:
senate_class = class_tup[0]
name = 'U.S. Senate, {0}, Class {1}'.format(
full_state.name,
senate_class
)
division_level = DivisionLevel.objects.get(
name='state'
)
division = Division.objects.get(
level=division_level,
code_components__postal=member['state']
)
elif chamber == 'house':
senate_class = None
name = 'U.S. House, {0}, District {1}'.format(
full_state.name,
member['district']
)
division_level = DivisionLevel.objects.get(
name='district'
)
code = ('00'
if member['at_large']
else member['district'].zfill(2)
)
division = Division.objects.get(
level=division_level,
parent__code_components__postal=member['state'],
code=code
)
body = Body.objects.get(
slug=chamber,
jurisdiction=self.fed
)
Office.objects.get_or_create(
name=name,
label=name,
jurisdiction=self.fed,
division=division,
body=body,
senate_class=senate_class
)
def build_governorships(self):
state_level = DivisionLevel.objects.get(name='state')
state_jurisdictions = Jurisdiction.objects.filter(
division__level=state_level
)
print('Loading governorships')
for jurisdiction in tqdm(state_jurisdictions):
name = '{0} Governor'.format(jurisdiction.division.name)
Office.objects.get_or_create(
name=name,
label=name,
jurisdiction=jurisdiction,
division=jurisdiction.division,
)
def build_presidency(self):
USA = Division.objects.get(
code='00',
level__name='country'
)
print('Loading presidency')
Office.objects.get_or_create(
slug="president",
name="President of the United States",
label="U.S. President",
short_label="President",
jurisdiction=self.fed,
division=USA,
)
def handle(self, *args, **options):
print('Loading offices')
for chamber in ['senate', 'house']:
self.build_congressional_offices(chamber)
self.build_governorships()
self.build_presidency()
| [
"government.models.Jurisdiction.objects.filter",
"tqdm.tqdm",
"os.environ.get",
"government.models.Jurisdiction.objects.get",
"geography.models.DivisionLevel.objects.get",
"government.models.Office.objects.get_or_create",
"government.models.Body.objects.get",
"us.states.lookup",
"geography.models.Di... | [((410, 466), 'government.models.Jurisdiction.objects.get', 'Jurisdiction.objects.get', ([], {'name': '"""U.S. Federal Government"""'}), "(name='U.S. Federal Government')\n", (434, 466), False, 'from government.models import Body, Jurisdiction, Office\n'), ((856, 894), 'tqdm.tqdm', 'tqdm', (["members['results'][0]['members']"], {}), "(members['results'][0]['members'])\n", (860, 894), False, 'from tqdm import tqdm\n'), ((2837, 2876), 'geography.models.DivisionLevel.objects.get', 'DivisionLevel.objects.get', ([], {'name': '"""state"""'}), "(name='state')\n", (2862, 2876), False, 'from geography.models import Division, DivisionLevel\n'), ((2908, 2964), 'government.models.Jurisdiction.objects.filter', 'Jurisdiction.objects.filter', ([], {'division__level': 'state_level'}), '(division__level=state_level)\n', (2935, 2964), False, 'from government.models import Body, Jurisdiction, Office\n'), ((3055, 3080), 'tqdm.tqdm', 'tqdm', (['state_jurisdictions'], {}), '(state_jurisdictions)\n', (3059, 3080), False, 'from tqdm import tqdm\n'), ((3401, 3455), 'geography.models.Division.objects.get', 'Division.objects.get', ([], {'code': '"""00"""', 'level__name': '"""country"""'}), "(code='00', level__name='country')\n", (3421, 3455), False, 'from geography.models import Division, DivisionLevel\n'), ((3536, 3717), 'government.models.Office.objects.get_or_create', 'Office.objects.get_or_create', ([], {'slug': '"""president"""', 'name': '"""President of the United States"""', 'label': '"""U.S. President"""', 'short_label': '"""President"""', 'jurisdiction': 'self.fed', 'division': 'USA'}), "(slug='president', name=\n 'President of the United States', label='U.S. President', short_label=\n 'President', jurisdiction=self.fed, division=USA)\n", (3564, 3717), False, 'from government.models import Body, Jurisdiction, Office\n'), ((921, 954), 'us.states.lookup', 'us.states.lookup', (["member['state']"], {}), "(member['state'])\n", (937, 954), False, 'import us\n'), ((2424, 2477), 'government.models.Body.objects.get', 'Body.objects.get', ([], {'slug': 'chamber', 'jurisdiction': 'self.fed'}), '(slug=chamber, jurisdiction=self.fed)\n', (2440, 2477), False, 'from government.models import Body, Jurisdiction, Office\n'), ((2537, 2672), 'government.models.Office.objects.get_or_create', 'Office.objects.get_or_create', ([], {'name': 'name', 'label': 'name', 'jurisdiction': 'self.fed', 'division': 'division', 'body': 'body', 'senate_class': 'senate_class'}), '(name=name, label=name, jurisdiction=self.fed,\n division=division, body=body, senate_class=senate_class)\n', (2565, 2672), False, 'from government.models import Body, Jurisdiction, Office\n'), ((3164, 3279), 'government.models.Office.objects.get_or_create', 'Office.objects.get_or_create', ([], {'name': 'name', 'label': 'name', 'jurisdiction': 'jurisdiction', 'division': 'jurisdiction.division'}), '(name=name, label=name, jurisdiction=\n jurisdiction, division=jurisdiction.division)\n', (3192, 3279), False, 'from government.models import Body, Jurisdiction, Office\n'), ((1443, 1482), 'geography.models.DivisionLevel.objects.get', 'DivisionLevel.objects.get', ([], {'name': '"""state"""'}), "(name='state')\n", (1468, 1482), False, 'from geography.models import Division, DivisionLevel\n'), ((1548, 1636), 'geography.models.Division.objects.get', 'Division.objects.get', ([], {'level': 'division_level', 'code_components__postal': "member['state']"}), "(level=division_level, code_components__postal=member[\n 'state'])\n", (1568, 1636), False, 'from geography.models import Division, DivisionLevel\n'), ((669, 714), 'os.environ.get', 'os.environ.get', (['"""PROPUBLICA_CONGRESS_API_KEY"""'], {}), "('PROPUBLICA_CONGRESS_API_KEY')\n", (683, 714), False, 'import os\n'), ((1955, 1997), 'geography.models.DivisionLevel.objects.get', 'DivisionLevel.objects.get', ([], {'name': '"""district"""'}), "(name='district')\n", (1980, 1997), False, 'from geography.models import Division, DivisionLevel\n'), ((2223, 2330), 'geography.models.Division.objects.get', 'Division.objects.get', ([], {'level': 'division_level', 'parent__code_components__postal': "member['state']", 'code': 'code'}), "(level=division_level, parent__code_components__postal=\n member['state'], code=code)\n", (2243, 2330), False, 'from geography.models import Division, DivisionLevel\n')] |
"""
Supporting file for Sala 2019 trawl model
Contains a function that accepts a fishery model as input and returns the appropriate "cluster" from the
Sala study. Unfortunately, the mediterranean trawlers are considerably smaller than the largest-scale models, so
large-scale trawl models will be inaccurately represented here.
"""
from collections import namedtuple
from .fishery import EFF_MAP
ClusterModel = namedtuple('ClusterModel', ('name', 'LOA', 'hp', 'GRT'))
clusters_list = [ClusterModel('Cluster 1', 8.19, 368.3, 42.88),
ClusterModel('Cluster 2', 15.33, 634.1, 87.53),
ClusterModel('Cluster 3', 19.77, 1345, 219.1),
ClusterModel('Cluster 4', 14.44, 1102, 65.11)]
mapping = {
'Vessel length in meters': ('LOA', 1.0),
'Gross Tonnage of vessel': ('GRT', 1.0),
'Kilowatt of engine capacity': ('hp', 1.34)
}
for k, v in EFF_MAP.items():
mapping[k] = mapping[v]
def _order_clusters(arg):
if arg not in ('LOA', 'hp', 'GRT'):
raise ValueError(arg)
return sorted(clusters_list, key=lambda x: getattr(x, arg), reverse=True)
def get_Sala_cluster(scaling_unit, scaling_value):
try:
arg, scl = mapping[scaling_unit]
except KeyError:
arg, scl = mapping[scaling_unit.name]
clusters = _order_clusters(arg)
for cluster in clusters:
if getattr(cluster, arg) < scaling_value * scl:
return cluster.name
return clusters[-1].name
| [
"collections.namedtuple"
] | [((417, 473), 'collections.namedtuple', 'namedtuple', (['"""ClusterModel"""', "('name', 'LOA', 'hp', 'GRT')"], {}), "('ClusterModel', ('name', 'LOA', 'hp', 'GRT'))\n", (427, 473), False, 'from collections import namedtuple\n')] |
'''
Copyright (c) 2017-2018, wezu (<EMAIL>)
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
import sys
if sys.version_info >= (3, 0):
import builtins
else:
import __builtin__ as builtins
import math
import random
import string
import datetime
import os
import copy
from contextlib import contextmanager
#this is the template string for formating the info in the 'stats' frame
stat_template='''
Armor Class: {ac:>3} Hit Points: {max_hp:>3} Crit. Power/CC:{crit_power:>3}/{crit_power_cc:>3} Bonus Damage: {bonus_dmg:>2}% HEX for 95% vs AC50 (eye aim):
Action Points: {ap:>3} HP/Level: {hp_per_level:>3} Critical Res.: {crit_res:>3} Bonus Fire Dmg.:{bonus_fire_dmg:>2}% SG: {sg_range:>4}({sg_range_aim:>3})
Carry Weight: {carry_weight:>3} Skill Points: {sp:>3} Crit. Res. Head: {crit_res_head:>3} Target DR: {target_dr:>2} SG long range: {sg_longrange:>4}({sg_longrange_aim:>3})
Melee Damage: {melee:>3} SP/Level: {sp_per_level:>3} Normal DT/DR: {normal_dt}/{normal_dr:>3}% Bonus XP: {bonus_xp:>2}% BG: {bg_range:>4}({bg_range_aim:>3})
Poison Res.: {poision_res:>3} Party Points: {pp:>3} Laser DT/DR: {laser_dt}/{laser_dr:>3}% Drug time: {drug_duration:>3}% BG long range: {bg_longrange:>4}({bg_longrange_aim:>3})
Radiation Res.: {radiation_res:>3} Total Perks: {max_perks:>3} Fire DT/DR: {fire_dt}/{fire_dr:>3}% Drug heal: {drug_heal:>3}% EW: {ew_range:>4}({ew_range_aim:>3})
Healing Rate: {healing_rate:>3} Unused Perks: {free_perks:>3} Plasma DT/DR: {plasma_dt}/{plasma_dr:>3}% FA healed: ~{fa_healed:>3} EW long range: {ew_longrange:>4}({ew_longrange_aim:>3})
Crit. Chance: {crit_chance:>3} Sight: {sight_a}/{sight_b}/{sight_c}/{sight_d:>2} Explode DT/DR: {explode_dt}/{explode_dr:>3}% FA cooldown: {fa_cooldown:>3} Throwing: {tw_range:>4}({tw_range_aim:>3})
CC. Crit. Chance:{crit_chance_cc:>3} Level: {lvl:>3} Electric DT/DR: {electrical_dt}/{electrical_dr:>3}% Doc cooldown: {doc_cooldown:>3}
'''
#this is the template string for formating the info in the 'target' frame
target_template='''Armor Class: 30
DT: DR:
NORMAL: {normal_dt:>2} {normal_dr:>2}
LASER: {laser_dt:>2} {laser_dr:>2}
FIRE: {fire_dt:>2} {fire_dr:>2}
PLASMA: {plasma_dt:>2} {plasma_dr:>2}
EXPLODE: {explode_dt:>2} {explode_dr:>2}
ELECTRIC: {electric_dt:>2} {electric_dr:>2}
CRITICAL RESISTANCE:
POWER: {crit_pow:>2}
CHANCE: {crit_c:>2}
ENDURANCE: {e:>2}
LUCK: {l:>2}
STRENGTH: {s:>2}
'''
class Stats():
'''
This calss handels calculating all Fonline stats
'''
def __init__(self):
#create some data structures used later
self.memory=[]
self.bonuses={}
self.perks={}
self.traits={}
self.skills={}
self.last_level_skills={}
self.tags={}
#fill in some data structures
self.current_gun=None
self.level=1
self.aim_mode='UNAIMED'
self.special={'s':5,'p':5,'e':5,'c':5,'i':5,'a':5,'l':5,'none':5}
self.target_stats={ 'ac':0, 'normal_dr':0, 'normal_dt':0, 'laser_dr':0,
'laser_dt': 0, 'fire_dr':0, 'fire_dt':0,
'plasma_dr':0, 'plasma_dt': 0, 'explode_dr': 0,
'explode_dt': 0, 'electric_dr':0, 'electric_dt':0,
'crit_c':0 ,'crit_pow':0, 'l':5, 's':5, 'e':5, None:0}
#bonus for aim modes critical chance and also to-hit penalty
self.aim_bonus={'EYE':60,
'HEAD':40,
'TORSO':0,
'GROIN':30,
'HANDS':30,
'LEGS':20,
'UNAIMED':0,
'BURST':0}
self.derived={'max_hp':0,'hp_per_level':0, 'sp':0, 'sp_per_level':0,'pp':0,
'max_perks':8, 'free_perks':0, 'sight':0, 'sequence':0,'lvl':0,
'ac':0, 'ap':0, 'carry_weight':0, 'melee':0, 'poision_res':0,
'radiation_res':0, 'healing_rate':0, 'crit_chance':0}
#some traits change SPECIAL, they are listed here
self.trait_special={'Bruiser':{'s':4},'Bonehead':{'i':-1}}
#maximum level of skills
self.skill_limits={'Small Guns':300,'Big Guns':300,'Energy Guns':300,
'Close Combat':300,'Throwing':300,'First Aid':200,
'Doctor':200,'Lockpick':150,'Repair':125,'Science':125,
'Outdoorsman':175,'Scavenging':0,'Sneak':270,'Steal':150,
'Traps':150,'Speech':300,'Gambling':150,'Barter':150}
#requirements for perks:
# perk_point -1 or 0, if 0 then the perk is a 'free' support perk
# level - minimum character level to buy perk
# skill - dictionary of minimum required skill levels to buy perk
# min_special - dictionary of minimum required special to buy perk (empty dict for no requirements)
# max_special- dictionary of maximum special to buy perk (empty dict for no requirements)
# perks - list of perk names required to buy this perk
self.perk_req={
'More Critical':{'perk_point':1, 'level':3, 'skill':{'Small Guns':100,'Big Guns':100,'Energy Guns':100,'Close Combat':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Quick Pockets':{'perk_point':1, 'level':3, 'skill':{}, 'min_special':{'a':5}, 'max_special':{}, 'perks':[]},
'Adrenaline Rush':{'perk_point':1, 'level':3, 'skill':{}, 'min_special':{'s':5}, 'max_special':{}, 'perks':[]},
'Quick Recovery':{'perk_point':1, 'level':3, 'skill':{}, 'min_special':{'s':6}, 'max_special':{}, 'perks':[]},
'Weapon Handling':{'perk_point':1, 'level':3, 'skill':{'Small Guns':100,'Big Guns':100,'Energy Guns':100,'Close Combat':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'In Your Face!':{'perk_point':1, 'level':6, 'skill':{'Close Combat':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Even More Criticals':{'perk_point':1, 'level':6, 'skill':{'Small Guns':125,'Big Guns':125,'Energy Guns':125,'Close Combat':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Silent Running':{'perk_point':1, 'level':6, 'skill':{'Sneak':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Toughness':{'perk_point':1, 'level':6, 'skill':{}, 'min_special':{'e':4}, 'max_special':{}, 'perks':[]},
'Sharpshooter':{'perk_point':1, 'level':9, 'skill':{'Small Guns':150,'Big Guns':150,'Energy Guns':150,'Close Combat':150}, 'min_special':{'i':3}, 'max_special':{}, 'perks':[]},
'Pyromaniac':{'perk_point':1, 'level':9, 'skill':{'Small Guns':100,'Big Guns':100,'Energy Guns':100,'Close Combat':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Close Combat Master':{'perk_point':1, 'level':9, 'skill':{'Close Combat':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Even Tougher':{'perk_point':1, 'level':9, 'skill':{}, 'min_special':{'e':6}, 'max_special':{}, 'perks':[]},
'Stonewall':{'perk_point':1, 'level':9, 'skill':{}, 'min_special':{'s':6}, 'max_special':{}, 'perks':[]},
'Medic':{'perk_point':1, 'level':9, 'skill':{'Doctor':125, 'First Aid':125}, 'min_special':{'i':3}, 'max_special':{}, 'perks':[]},
'Heave Ho!':{'perk_point':1, 'level':9, 'skill':{'Throwing':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Bonus Ranged Dmg.':{'perk_point':1, 'level':9, 'skill':{'Small Guns':150,'Big Guns':150,'Energy Guns':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Lifegiver 1':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Gain ST':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'s':9}, 'perks':[]},
'Gain PE':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'p':9}, 'perks':[]},
'Gain EN':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'e':9}, 'perks':[]},
'Gain CH':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'c':9}, 'perks':[]},
'Gain IN':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'i':9}, 'perks':[]},
'Gain AG':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'a':9}, 'perks':[]},
'Gain LK':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{}, 'max_special':{'l':9}, 'perks':[]},
'Better Critical':{'perk_point':1, 'level':12, 'skill':{'Small Guns':175,'Big Guns':175,'Energy Guns':175,'Close Combat':175}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Ghost':{'perk_point':1, 'level':12, 'skill':{'Sneak':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Action Boy 1':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{'a':6}, 'max_special':{}, 'perks':[]},
'Action Boy 2':{'perk_point':1, 'level':12, 'skill':{}, 'min_special':{'a':6}, 'max_special':{}, 'perks':[]},
'Lifegiver 2':{'perk_point':1, 'level':15, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Dodger 1':{'perk_point':1, 'level':15, 'skill':{'Close Combat':150 }, 'min_special':{'a':8}, 'max_special':{}, 'perks':[]},
'Dodger 2':{'perk_point':1, 'level':18, 'skill':{'Close Combat':175 }, 'min_special':{'a':10}, 'max_special':{}, 'perks':['Dodger 1']},
'Livewire':{'perk_point':1, 'level':15, 'skill':{}, 'min_special':{'a':6}, 'max_special':{}, 'perks':[]},
'Man of Steel':{'perk_point':15, 'level':3, 'skill':{}, 'min_special':{'e':8}, 'max_special':{}, 'perks':[]},
'Field Medic':{'perk_point':1, 'level':15, 'skill':{'Doctor':175, 'First Aid':175}, 'min_special':{}, 'max_special':{}, 'perks':['Medic']},
'Iron Limbs':{'perk_point':1, 'level':15, 'skill':{}, 'min_special':{'s':6, 'e':6}, 'max_special':{}, 'perks':[]},
'Silent Death':{'perk_point':1, 'level':15, 'skill':{'Sneak':175}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'More Ranged Dmg.':{'perk_point':1, 'level':15, 'skill':{'Small Guns':200,'Big Guns':200,'Energy Guns':200}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Lifegiver 3':{'perk_point':1, 'level':18, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Bonus Rate of Attack':{'perk_point':1, 'level':18, 'skill':{'Small Guns':180,'Big Guns':180,'Energy Guns':180,'Close Combat':180}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Boneyard Guard SG':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Boneyard Guard BG':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Boneyard Guard EW':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Boneyard Guard CC':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Boneyard Guard THW':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Cautious Nature':{'perk_point':0, 'level':2, 'skill':{'Outdoorsman':100}, 'min_special':{'p':6}, 'max_special':{}, 'perks':[]},
'Dead Man Walking':{'perk_point':0, 'level':2, 'skill':{'Doctor':50}, 'min_special':{'i':5}, 'max_special':{}, 'perks':[]},
'Demolition Expert':{'perk_point':0, 'level':2, 'skill':{'Traps':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Dismantler':{'perk_point':0, 'level':2, 'skill':{'Science':120}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Educated':{'perk_point':0, 'level':2, 'skill':{'Science':100}, 'min_special':{'i':8}, 'max_special':{}, 'perks':[]},
'Explorer':{'perk_point':0, 'level':2, 'skill':{'Outdoorsman':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Faster Healing':{'perk_point':0, 'level':2, 'skill':{'Doctor':75}, 'min_special':{'i':6}, 'max_special':{}, 'perks':[]},
'Gecko Skinning':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Harmless':{'perk_point':0, 'level':2, 'skill':{'Steal':125}, 'min_special':{'c':6}, 'max_special':{}, 'perks':[]},
'Light Step':{'perk_point':0, 'level':2, 'skill':{'Traps': 100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Magnetic Personality':{'perk_point':0, 'level':2, 'skill':{'Speech':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Master Thief':{'perk_point':0, 'level':2, 'skill':{'Steal':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Mr. Fixit':{'perk_point':0, 'level':2, 'skill':{'Repair':120}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Negotiator':{'perk_point':0, 'level':2, 'skill':{'Barter': 125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Pack Rat':{'perk_point':0, 'level':3, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Pathfinder':{'perk_point':0, 'level':2, 'skill':{'Outdoorsman':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Pickpocket':{'perk_point':0, 'level':2, 'skill':{'Steal': 125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Rad Resistance':{'perk_point':0, 'level':2, 'skill':{'Doctor':100}, 'min_special':{'i':7}, 'max_special':{}, 'perks':[]},
'Ranger':{'perk_point':0, 'level':2, 'skill':{'Outdoorsman':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Scout':{'perk_point':0, 'level':2, 'skill':{'Outdoorsman':150}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Sex Appeal':{'perk_point':0, 'level':2, 'skill':{'Speech':75}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Snakeater':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{'e':6}, 'max_special':{}, 'perks':[]},
'Speaker':{'perk_point':0, 'level':2, 'skill':{'Speech': 125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Stealth Girl':{'perk_point':0, 'level':2, 'skill':{'Sneak':100, 'Repair':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Strong Back':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{'e':6}, 'max_special':{}, 'perks':[]},
'Swift Learner':{'perk_point':0, 'level':2, 'skill':{'Science':50}, 'min_special':{'i':6}, 'max_special':{}, 'perks':[]},
'Thief':{'perk_point':0, 'level':2, 'skill':{'Steal':100}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Treasure Hunter':{'perk_point':0, 'level':2, 'skill':{'Lockpick':125}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Repair x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'First Aid x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Small Guns x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Outdoorsman x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Barter x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]},
'Science x10':{'perk_point':0, 'level':2, 'skill':{}, 'min_special':{}, 'max_special':{}, 'perks':[]}
}
#some perks give skill bonus, this dict tell how much and of what skill
#the values are in 'skill points' not raw points
self.perk_skill_bonus={'Repair x10':{'Repair':60},
'First Aid x10':{'First Aid':60},
'Small Guns x10':{'Small Guns':60},
'Outdoorsman x10':{'Outdoorsman':60},
'Barter x10':{'Barter':60},
'Science x10':{'Science':60},
'Boneyard Guard SG':{'Small Guns':10},
'Boneyard Guard BG':{'Big Guns':10},
'Boneyard Guard CC':{'Close Combat':10},
'Boneyard Guard THW':{'Throwing':10},
'Boneyard Guard EW':{'Energy Guns':10}}
#some perks give special bonuses,this dict tell how much
self.perk_special_bonus={'Gain ST':{'s':2},
'Gain PE':{'p':2},
'Gain EN':{'e':2},
'Gain CH':{'c':2},
'Gain IN':{'i':2},
'Gain AG':{'a':2},
'Gain LK':{'l':2}}
#known guns (or other weapons)
#min_dmg and max_dmg - damage dealt by gun
#max_range - maximum gun range
#min_burst - minimum number of bullets hitting a target when using burst mode, or 0 if gun can't burst
#max_burst - maximum number of bullets hitting a target when using burst mode, or 0 if gun can't burst
#skill - name of the skill used by this gun
#ammo_dr - ammunition DR modifier (should be negative)
#ammo_ac- ammunition AC modifier
#ammo_dmg - ammunition DM modifier (or damage adjustment), should be float if !=1 (eg. 1.5 not 3/2 unless running py3+)
#min_st - minimum strength required by gun
#ap - action points needed to fire (in single mode)
#dmg_type - type of damage dealt by weapon ('normal','laser','fire', 'plasma','explode','electric')
#hands - number of hands needed to use gun
#perks - list of perks the gun(and ammo!) has
self.guns={
'.223 Pistol':{'min_dmg':30, 'max_dmg':40, 'max_range':30, 'min_burst':0, 'max_burst':0,
'skill':'Small Guns', 'ammo_dr':-30, 'ammo_ac':-20, 'ammo_dmg':1,'min_st':5, 'ap':4,
'dmg_type':'normal','hands':1, 'perks':{'Penetrate'}},
'Assault Rifle':{'min_dmg':25, 'max_dmg':35, 'max_range':50, 'min_burst':4, 'max_burst':6,
'skill':'Small Guns', 'ammo_dr':-35, 'ammo_ac':0, 'ammo_dmg':0.667,'min_st':5, 'ap':5,
'dmg_type':'normal','hands':2, 'perks':{'Long Range','Penetrate'}},
'Sniper Rifle':{'min_dmg':20, 'max_dmg':40, 'max_range':50, 'min_burst':0, 'max_burst':0,
'skill':'Small Guns', 'ammo_dr':-30, 'ammo_ac':-20, 'ammo_dmg':1,'min_st':5, 'ap':5,
'dmg_type':'normal','hands':2, 'perks':{'Long Range'}},
'Needler Pistol':{'min_dmg':15, 'max_dmg':30, 'max_range':24, 'min_burst':0, 'max_burst':0,
'skill':'Small Guns', 'ammo_dr':0, 'ammo_ac':-35, 'ammo_dmg':1.5,'min_st':3, 'ap':4,
'dmg_type':'normal','hands':1, 'perks':{'Penetrate'}},
'H&K CAWS':{'min_dmg':22, 'max_dmg':30, 'max_range':25, 'min_burst':5, 'max_burst':5,
'skill':'Small Guns', 'ammo_dr':-25, 'ammo_ac':-10, 'ammo_dmg':1,'min_st':6, 'ap':5,
'dmg_type':'normal','hands':2, 'perks':{'Penetrate','Knockback'}},
'H&K P90c':{'min_dmg':15, 'max_dmg':25, 'max_range':30, 'min_burst':5, 'max_burst':7,
'skill':'Small Guns', 'ammo_dr':-35, 'ammo_ac':0, 'ammo_dmg':0.8571,'min_st':4, 'ap':4,
'dmg_type':'normal','hands':1, 'perks':{'Penetrate'}},
'FN FAL':{'min_dmg':22, 'max_dmg':33, 'max_range':35, 'min_burst':10, 'max_burst':10,
'skill':'Small Guns', 'ammo_dr':-10, 'ammo_ac':-5, 'ammo_dmg':1.2,'min_st':5, 'ap':5,
'dmg_type':'normal','hands':2, 'perks':{'Fast Reload'}},
'M79':{'min_dmg':50, 'max_dmg':90, 'max_range':25, 'min_burst':0, 'max_burst':0,
'skill':'Small Guns', 'ammo_dr':-15, 'ammo_ac':0, 'ammo_dmg':1,'min_st':5, 'ap':6,
'dmg_type':'explode','hands':2, 'perks':{}},
'Avenger':{'min_dmg':12, 'max_dmg':15, 'max_range':35, 'min_burst':14, 'max_burst':20,
'skill':'Big Guns', 'ammo_dr':-35, 'ammo_ac':0, 'ammo_dmg':0.667,'min_st':7, 'ap':7,
'dmg_type':'normal','hands':2, 'perks':{'Accurate','Penetrate'}},
'Incinerator':{'min_dmg':100, 'max_dmg':125, 'max_range':10, 'min_burst':0, 'max_burst':0,
'skill':'Big Guns', 'ammo_dr':0, 'ammo_ac':-20, 'ammo_dmg':1,'min_st':7, 'ap':7,
'dmg_type':'fire','hands':1, 'perks':{}},
'Rocket Launcher':{'min_dmg':40, 'max_dmg':100, 'max_range':40, 'min_burst':0, 'max_burst':0,
'skill':'Big Guns', 'ammo_dr':-25, 'ammo_ac':-15, 'ammo_dmg':1,'min_st':7, 'ap':7,
'dmg_type':'explode','hands':1, 'perks':{'Long Range','Penetrate'}},
'LSW':{'min_dmg':22, 'max_dmg':37, 'max_range':45, 'min_burst':5, 'max_burst':5,
'skill':'Big Guns', 'ammo_dr':-30, 'ammo_ac':-20, 'ammo_dmg':1, 'min_st':6, 'ap':5,
'dmg_type':'normal','hands':1, 'perks':{'Long Range'}},
'Laser Rifle':{'min_dmg':50, 'max_dmg':60, 'max_range':47, 'min_burst':0, 'max_burst':0,
'skill':'Energy Guns', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':6, 'ap':5,
'dmg_type':'laser','hands':2, 'perks':{'Long Range'}},
'Plasma Pistol':{'min_dmg':30, 'max_dmg':45, 'max_range':32, 'min_burst':0, 'max_burst':0,
'skill':'Energy Guns', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':4,
'dmg_type':'plasma','hands':1, 'perks':{}},
'Plasma Rifle':{'min_dmg':30, 'max_dmg':45, 'max_range':32, 'min_burst':0, 'max_burst':0,
'skill':'Energy Guns', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':4,
'dmg_type':'plasma','hands':2, 'perks':{'Long Range'}},
'Gatling Laser':{'min_dmg':64, 'max_dmg':84, 'max_range':40, 'min_burst':4, 'max_burst':6,
'skill':'Energy Guns', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':7, 'ap':7,
'dmg_type':'laser','hands':2, 'perks':{'Long Range'}},
'Pulse Pistol':{'min_dmg':35, 'max_dmg':50, 'max_range':30, 'min_burst':0, 'max_burst':0,
'skill':'Energy Guns', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':3, 'ap':4,
'dmg_type':'electric','hands':1, 'perks':{'Penetrate'}},
'Mega Power Fist':{'min_dmg':21, 'max_dmg':41, 'max_range':1, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':1, 'ap':3,
'dmg_type':'electric','hands':1, 'perks':{'Penetrate'}},
'Wakizashi Blade':{'min_dmg':15, 'max_dmg':27, 'max_range':1, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':2, 'ap':4,
'dmg_type':'normal','hands':1, 'perks':{'Penetrate'}},
'Super Sledge':{'min_dmg':36, 'max_dmg':72, 'max_range':2, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':5, 'ap':4,
'dmg_type':'normal','hands':2, 'perks':{'Knockback'}},
'Super Cattle Prod':{'min_dmg':40, 'max_dmg':62, 'max_range':1, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':3,
'dmg_type':'electric','hands':1, 'perks':{'Accurate'}},
'Louisville Slugger':{'min_dmg':24, 'max_dmg':60, 'max_range':1, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':4,
'dmg_type':'normal','hands':1, 'perks':{'Knockback','Knockout'}},
'Ripper':{'min_dmg':16, 'max_dmg':33, 'max_range':1, 'min_burst':0, 'max_burst':0,
'skill':'Close Combat', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':3,
'dmg_type':'normal','hands':1, 'perks':{'Penetrate'}},
'Frag Grenade':{'min_dmg':35, 'max_dmg':60, 'max_range':15, 'min_burst':0, 'max_burst':0,
'skill':'Throwing', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':3, 'ap':5,
'dmg_type':'explode','hands':1, 'perks':{}},
'Plasma Grenade':{'min_dmg':60, 'max_dmg':120, 'max_range':15, 'min_burst':0, 'max_burst':0,
'skill':'Throwing', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':4,
'dmg_type':'plasma','hands':1, 'perks':{}},
'Fire Grenade':{'min_dmg':40, 'max_dmg':80, 'max_range':15, 'min_burst':0, 'max_burst':0,
'skill':'Throwing', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':4, 'ap':4,
'dmg_type':'fire','hands':1, 'perks':{}},
'Dynacord':{'min_dmg':60, 'max_dmg':100, 'max_range':15, 'min_burst':0, 'max_burst':0,
'skill':'Throwing', 'ammo_dr':0, 'ammo_ac':0, 'ammo_dmg':1,'min_st':3, 'ap':5,
'dmg_type':'explode','hands':1, 'perks':{}}
}
#armor statistics presets
self.armor_stats={'Thermal':{ 'ac': 10, 'normal_dr': 30, 'normal_dt': 4, 'laser_dr': 20, 'laser_dt': 0, 'fire_dr': 75, 'fire_dt': 4, 'plasma_dr': 10, 'plasma_dt': 0, 'explode_dr': 35, 'explode_dt': 4, 'electric_dr': 40, 'electric_dt': 1, 'crit_c': -10 ,'crit_pow': -5 },
'Tesla':{ 'ac': 15, 'normal_dr': 25, 'normal_dt': 3, 'laser_dr': 85, 'laser_dt': 10, 'fire_dr': 10, 'fire_dt': 0, 'plasma_dr': 75, 'plasma_dt': 10, 'explode_dr': 20, 'explode_dt': 1, 'electric_dr': 80, 'electric_dt': 12, 'crit_c': -5 ,'crit_pow': -10 },
'CA':{ 'ac': 20, 'normal_dr': 40, 'normal_dt': 5, 'laser_dr': 60, 'laser_dt': 6, 'fire_dr': 25, 'fire_dt': 3, 'plasma_dr': 50, 'plasma_dt': 4, 'explode_dr': 40, 'explode_dt': 5, 'electric_dr': 45, 'electric_dt': 2, 'crit_c': -10 ,'crit_pow': -10 },
'CA mk2':{ 'ac': 25, 'normal_dr': 40, 'normal_dt': 6, 'laser_dr': 65, 'laser_dt': 7, 'fire_dr': 30, 'fire_dt': 4, 'plasma_dr': 50, 'plasma_dt': 5, 'explode_dr': 40, 'explode_dt': 6, 'electric_dr': 50, 'electric_dt': 3, 'crit_c': -10 ,'crit_pow': -10 },
'PA':{ 'ac': 30, 'normal_dr': 40, 'normal_dt': 12, 'laser_dr': 80, 'laser_dt': 18, 'fire_dr': 60, 'fire_dt': 12, 'plasma_dr': 40, 'plasma_dt': 10, 'explode_dr': 50, 'explode_dt': 20, 'electric_dr': 40, 'electric_dt': 12, 'crit_c': -15 ,'crit_pow': -15 },
'APA':{ 'ac': 30, 'normal_dr': 55, 'normal_dt': 15, 'laser_dr': 90, 'laser_dt': 19, 'fire_dr': 70, 'fire_dt': 16, 'plasma_dr': 60, 'plasma_dt': 15, 'explode_dr': 65, 'explode_dt': 20, 'electric_dr': 60, 'electric_dt': 15, 'crit_c': -15 ,'crit_pow': -15 }
}
#damage modifier for critical hits
#the values are pairs, such that the first value is the chance (eg 20=20%) and the second the modifier (eg. 3.0 =x3)
#the order is relevant, the first pair is for the lowest critical hit power roll, the last for the highest
self.critical_hit_dmg={
'EYE': [[20, 3.0],[25, 3.0], [25,3.0],[20,3.5], [10, 3.5],[0, 3.5]],
'HEAD': [[20, 3.0],[25, 3.0], [25,3.0],[20,3.0], [10, 3.0],[0, 3.0]],
'TORSO': [[20, 3.0],[25, 3.0], [25,3.0],[20,3.5], [10, 3.5],[0, 3.5]],
'GROIN': [[20, 2.5],[25, 2.5], [25,3.0],[20,3.0], [10, 3.0],[0, 3.0]],
'HANDS' :[[20, 2.5],[25, 2.5], [25,2.5],[20,3.0], [10, 3.0],[0, 3.0]],
'LEGS': [[20, 2.5],[25, 2.5], [25,2.5],[20,3.0], [10, 3.0],[0, 3.0]],
'UNAIMED':[[20, 1.5],[25, 1.5], [25,1.5],[20,2.0], [10, 2.0],[0, 2.0]],
'BURST': [[20, 1.5],[25, 1.5], [25,1.5],[20,2.0], [10, 2.0],[0, 2.0]]
}
#effects of critiacl hits
#the values are pairs, such that the first value is the chance (eg 20=20%),
#the second is a dictionary mess...
# the key is either None for guaranteed effects
# or a tuple with a s.p.e.c.i.a.l. name and roll penalty value
# eg. ('e',-2) = "Roll EN-2"
# the s.p.e.c.i.a.l. vale of None is used for the uncanny nameless 'Roll'
# the value in the dict is a tuple of effects names (note the "," on one value entries!)
#just like self.critical_hit_dmg, the order is relevant, the first entry is for the lowest critical hit power roll, the last for the highest
# TODO(low):change this data structure for something sane!
self.critical_hit_effect={
'EYE': [[20, None],
[25, {(None, 5):('Knockdown',)}],
[25, {('l',0):('Knockdown','Blinded')}],
[20, {('e',-2):('Knockdown','Blinded'), None:('Blinded',)}],
[10, {('l',0):('Knockout',), None:('Knockout','Blinded')}],
[0, {('e',0):('Death',), None:('Knockout','Blinded')}]
],
'HEAD': [[20, {('e',0):('Knockdown',)}],
[25, {(None, 5):('Knockdown',)}],
[25, {('e',-2):('Knockdown',)}],
[20, {('e',-2):('Knockout',), None:('Knockdown',)}],
[10, {None:('Knockout',)}],
[0, {('e',0):('Death',), None:('Knockout',)}]
],
'TORSO': [[20, {None:('Knockdown',)}],
[25, {None:('Knockdown',)}],
[25, {None:('Knockdown',)}],
[20, {None:('Knockdown',)}],
[10, {None:('Knockdown',)}],
[0, {None:('Knockdown',)}]
],
'GROIN': [[20, {('e',0):('Knockdown',)}],
[25, {('e',3):('Knockout',), None:('Knockdown',)}],
[25, {('e',0):('Knockout',), None:('Knockdown',)}],
[20, {('e',-2):('Knockout',), None:('Knockdown',)}],
[10, {None:('Knockout',)}],
[0, {('e',3):('Death',), None:('Knockout',)}],
],
'HANDS': [[20, {('s',3):('Weapon Drop',)}],
[25, {('s',2):('Weapon Drop',)}],
[25, {('s',1):('Weapon Drop',)}],
[20, {('s',0):('Cripple Hand',), None:('Weapon Drop',)}],
[10, {None:('Weapon Drop','Cripple Hand')}],
[0, {None:('Weapon Drop','Cripple Hand')}]
],
'LEGS': [[20, {('e',5):('Cripple Leg',), None:('Knockdown',)}],
[25, {('e',2):('Cripple Leg',), None:('Knockdown',)}],
[25, {('e',0):('Cripple Leg',), None:('Knockdown',)}],
[20, {('e',0):('Cripple Leg',), None:('Knockdown',)}],
[10, {('e',0):('Knockout',), None:('Knockdown','Cripple Leg')}],
[0, {None:('Knockout','Cripple Leg')}]
],
'UNAIMED':[[20, None],
[25, None],
[25, None],
[20, {None:('Knockdown',)}],
[10, {None:('Knockdown',)}],
[0, {None:('Knockout',)}]
],
'BURST': [[20, None],[25, None], [25,None],[20,None], [10, None],[0, None]] #???
}
self._calc_derived()
def _on_load(self):
self.bonuses={}
self.current_gun=None
self.aim_mode='UNAIMED'
self.target_stats={ 'ac':0, 'normal_dr':0, 'normal_dt':0, 'laser_dr':0,
'laser_dt': 0, 'fire_dr':0, 'fire_dt':0,
'plasma_dr':0, 'plasma_dt': 0, 'explode_dr': 0,
'explode_dt': 0, 'electric_dr':0, 'electric_dt':0,
'crit_c':0 ,'crit_pow':0, 'l':5, 's':5, 'e':5, None:0}
for name, element in game.gui.elements.items():
if (
name.startswith('trait_frame_button_') or
name.startswith('perk_frame_button_') or
name.startswith('skill_frame_button_') or
name.startswith('bonus_frame_button_') or
name.startswith('weapon_frame_button_') or
name.startswith('target_frame_button_') or
name.startswith('hit_frame_button_')
):
game.gui.highlight(on=False, name=name)
for name in self.traits:
button_name='trait_frame_button_'+name.lower().replace(' ', '')
game.gui.highlight(on=True, name=button_name)
for name in self.perks:
button_name='perk_frame_button_'+name.lower().replace(' ', '')
game.gui.highlight(on=True, name=button_name)
for name in self.tags:
button_name='skill_frame_button_'+name.lower().replace(' ', '')
game.gui.highlight(on=True, name=button_name)
self.update_ui()
def _cals_skills(self):
'''
Calculates the starting values for all skills
'''
if self.level==1:
self.skills['Small Guns']=5 + (4 *self.special['a'])
self.skills['Big Guns']=2 *self.special['a']
self.skills['Energy Guns']=10 + (1 *self.special['a'])
self.skills['Close Combat']= 30 + 2 * (self.special['a'] + self.special['s'])
self.skills['Throwing']=40 + self.special['a']
self.skills['First Aid']=30 + ((self.special['p']+self.special['i'])//2)
self.skills['Doctor']=15 + ((self.special['p']+self.special['i'])//2)
self.skills['Lockpick']= 10 + (self.special['p'] +self.special['a'])
self.skills['Repair']= 20 + (self.special['i'])
self.skills['Science']= 25 + (2 * self.special['i'])
self.skills['Outdoorsman']=5 + (1 * (self.special['i'] + self.special['e'])//2)
self.skills['Scavenging']=0
self.skills['Sneak']=5 + 3 *self.special['a']
self.skills['Steal']= 3 *self.special['a']
self.skills['Traps']= 20+ (self.special['p'] + self.special['a'] )//2
self.skills['Speech']= 25 + (2 * self.special['c'])
self.skills['Gambling']= 5*self.special['l']
self.skills['Barter']=20 + (2 * self.special['c'])
for name in self.tags:
self.skills[name]+=20
def _calc_derived(self):
'''
Calculate all 'derived' stats.
'''
#these are not affected by bonuse
self._cals_skills()
self.derived['hp_per_level']=self.special['e']/2
self.derived['max_hp']=math.ceil(55+self.special['s']+(2*self.special['e'])+min(self.level-1, 23)*self.derived['hp_per_level'])+(30 if 'Lifegiver 1' in self.perks else 0)+(30 if 'Lifegiver 2' in self.perks else 0)+(30 if 'Lifegiver 3' in self.perks else 0)
self.derived['sp_per_level']=5+ self.special['i']*2 + (5 if 'Skilled' in self.traits else 0) + (2 if 'Educated' in self.perks else 0)
#these are
with self._special_bonus():
self.derived['max_perks']= 6 if 'Skilled' in self.traits else 8
self.derived['pp']= 10*self.special['c']+self.skills['Speech']//3 +(50 if 'Magnetic Personality' in self.perks else 0) +(50 if 'Good Natured' in self.traits else 0)
self.derived['sight']=20+self.special['p']*3 +(6 if 'Sharpshooter' in self.perks else 0)
self.derived['sight_a']=20+self.special['p']*3 +(6 if 'Sharpshooter' in self.perks else 0)
self.derived['sight_b']=17+self.special['p']*2 +(6 if 'Sharpshooter' in self.perks else 0)
self.derived['sight_c']=9+self.special['p'] +(6 if 'Sharpshooter' in self.perks else 0)
self.derived['sight_d']=6+self.special['p'] +(6 if 'Sharpshooter' in self.perks else 0)
self.derived['sequence']=2+self.special['p']*2
self.derived['ac']=3*self.special['a']*(2 if 'Livewire' in self.perks else 1)
self.derived['ap']=5+self.special['a']//2+(1 if 'Action Boy 1' in self.perks else 0)+(1 if 'Action Boy 2' in self.perks else 0)+(-2 if 'Bruiser' in self.traits else 0)
self.derived['carry_weight']=int((20+((25+(self.special['s']*25))/2.2)) *(1.33 if 'Pack Rat' in self.perks else 1.0) +(22 if 'Strong Back' in self.perks else 0))
self.derived['melee']=max(self.special['s']-5, 1)*(2 if 'Bruiser' in self.traits else 1)+(5 if 'Heavy Handed' in self.traits else 0)+(10 if 'Close Combat Master' in self.perks else 0)
self.derived['poision_res']=5*self.special['e']+(20 if 'Rad Resistance' in self.perks else 0)+(30 if 'Snakeater' in self.perks else 0)
self.derived['radiation_res']=2*self.special['e']+(30 if 'Rad Resistance' in self.perks else 0)+(20 if 'Snakeater' in self.perks else 0)
self.derived['healing_rate']= (7+self.special['e']//2)*(2 if 'Fast Metabolism' in self.traits else 1)+(5 if 'Faster Healing' in self.perks else 0)
self.derived['crit_chance']=self.special['l']+(5 if 'More Critical' in self.perks else 0)+(10 if 'Even More Criticals' in self.perks else 0) +(10 if 'Finesse' in self.traits else 0)
self.derived['crit_chance_cc']=self.derived['crit_chance']+(15 if 'Close Combat Master' in self.perks else 0)
self.derived['crit_power']= (20 if 'Better Critical' in self.perks else 0)+ (-20 if 'Heavy Handed' in self.traits else 0)
self.derived['crit_power_cc'] = self.derived['crit_power'] +(5 if 'Better Critical' in self.perks else 0)
self.derived['crit_res'] = (10 if 'Man of Steel' in self.perks else 0)
self.derived['crit_res_head']= self.derived['crit_res'] + (10 if 'Bonehead' in self.traits else 0)
self.derived['normal_dt']= (1 if 'Toughness' in self.perks else 0)+(3 if 'Even Tougher' in self.perks else 0)
self.derived['normal_dr']= (5 if 'Toughness' in self.perks else 0)+(10 if 'Even Tougher' in self.perks else 0)-(10 if 'Kamikaze' in self.traits else 0)
self.derived['laser_dt']= (1 if 'Toughness' in self.perks else 0)
self.derived['laser_dr']= (5 if 'Toughness' in self.perks else 0)-(10 if 'Kamikaze' in self.traits else 0)
self.derived['fire_dt']= (1 if 'Toughness' in self.perks else 0)
self.derived['fire_dr']= (5 if 'Toughness' in self.perks else 0)+-(10 if 'Kamikaze' in self.traits else 0)
self.derived['plasma_dt']= (1 if 'Toughness' in self.perks else 0)
self.derived['plasma_dr']= (5 if 'Toughness' in self.perks else 0)-(10 if 'Kamikaze' in self.traits else 0)
self.derived['explode_dt']= (1 if 'Toughness' in self.perks else 0)
self.derived['explode_dr']= (5 if 'Toughness' in self.perks else 0)-(10 if 'Kamikaze' in self.traits else 0)
self.derived['electrical_dt']= (1 if 'Toughness' in self.perks else 0)
self.derived['electrical_dr']= (5 if 'Toughness' in self.perks else 0)-(10 if 'Kamikaze' in self.traits else 0)
self.derived['bonus_dmg']=(10 if 'Kamikaze' in self.traits else 0)
self.derived['bonus_fire_dmg']=(25 if 'Pyromaniac' in self.perks else 0)
self.derived['target_dr']=(30 if 'Finesse' in self.traits else 0)
self.derived['bonus_xp']=(10 if 'Loner' in self.traits else 0)+(10 if 'Swift Learner' in self.perks else 0)
self.derived['drug_heal']=(66 if 'Chem Reliant' in self.traits else 100)
self.derived['drug_duration']=int(100*(2 if 'Chem Reliant' in self.traits else 1)*(0.5 if 'Fast Metabolism' in self.traits else 1))
self.derived['fa_healed']= int(0.7*self.skills['First Aid'])+(30 if 'Field Medic' in self.perks else 0)+(22 if 'Medic' in self.perks else 0)
self.derived['fa_cooldown']=min(180, 180-(self.skills['First Aid']//25 -2)*15)
self.derived['doc_cooldown']=min(180, 180-(self.skills['Doctor']//25 -2)*15)
self.derived['lvl']=self.level
self.derived['sg_range']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Small Guns'])/4+2*(self.special['p']-2))
self.derived['bg_range']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Big Guns'])/4+2*(self.special['p']-2))
self.derived['ew_range']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Energy Guns'])/4+2*(self.special['p']-2))
self.derived['sg_longrange']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Small Guns'])/4+4*(self.special['p']-2))
self.derived['bg_longrange']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Big Guns'])/4+4*(self.special['p']-2))
self.derived['ew_longrange']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Energy Guns'])/4+4*(self.special['p']-2))
self.derived['sg_range_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Small Guns'])/4+2*(self.special['p']-2))
self.derived['bg_range_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Big Guns'])/4+2*(self.special['p']-2))
self.derived['ew_range_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Energy Guns'])/4+2*(self.special['p']-2))
self.derived['sg_longrange_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Small Guns'])/4+4*(self.special['p']-2))
self.derived['bg_longrange_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Big Guns'])/4+4*(self.special['p']-2))
self.derived['ew_longrange_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Energy Guns'])/4+4*(self.special['p']-2))
self.derived['tw_range']=int(-(95+50-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Throwing'])/4+2*(self.special['p']-2))
self.derived['tw_range_aim']=int(-(95+50+60-(8 if 'Sharpshooter' in self.perks else 0)-self.skills['Throwing'])/4+2*(self.special['p']-2))
self.derived['min_dmg']=0
self.derived['max_dmg']=0
self.derived['accuracy']=0
self.derived['range']=0
#apply bonus from drugs, armors and weapons
for bonus in self.bonuses.values():
for name, value in bonus.items():
if name in self.derived:
self.derived[name]+=value
def bonus(self, source, values):
'''Adds a temporary bonus to any statistic.
source is a unique name to identify the source of the bonus (eg. drug name)
values is a dict of name:value for the stats (can be special, or derived)
'''
button_name='bonus_frame_button_'+source.lower().replace(' ', '')
if source in self.bonuses:
del self.bonuses[source]
game.gui.highlight(on=False, name=button_name)
else:
self.bonuses[source]=values
game.gui.highlight(on=True, name=button_name)
self.update_ui()
@contextmanager
def _special_bonus(self, *args):
'''Context manager for applying temporary bonuses
'''
missing_special={}
if self.bonuses:
for source, bonus in self.bonuses.items():
for bonus_name, value in bonus.items():
if bonus_name in 'special':
self.special[bonus_name]+=value
if self.special[bonus_name]>10:
self.bonuses[source][bonus_name]-=self.special[bonus_name]-10
self.special[bonus_name]=10
for special_name, value in self.special.items():
if value<1 and special_name !='none':
self.special[special_name]=1
missing_special[special_name]=value
try:
yield
finally:
if self.bonuses:
for special_name, value in missing_special.items():
self.special[special_name]=value
for bonus in self.bonuses.values():
for bonus_name, value in bonus.items():
if bonus_name in 'special':
self.special[bonus_name]-=value
def get_txt(self):
'''Returns a string with the summary of the stats
'''
preview='Level: '+str(self.level)+'\n'
preview+='S: {s}\nP: {p}\nE: {e}\nC: {c}\nI: {i}\nA: {a}\nL: {l}\n'.format(**self.special)
preview+='Traits:\n'
for trait in self.traits:
preview+=' -'+trait+'\n'
preview+='Perks:\n'
for perk, level in self.perks.items():
preview+=' -'+perk+' ('+str(level)+')\n'
preview+='Skills:\n'
for skill, level in self.skills.items():
if skill in self.tags:
preview+=' +'
else:
preview+=' -'
preview+=str(skill+':').ljust(13)+str(level)+'\n'
return preview
def dump(self):
'''Saves the character to a text file
'''
name_of_file=game.gui['filename_input'].get()
if name_of_file =='':
name_of_file=datetime.datetime.now().strftime('%Y_%m_%d_%H%M%S_')
name_of_file+=''.join([random.choice(string.ascii_letters+string.digits) for ch in range(8)])
name_of_file+='.txt'
while os.path.exists(name_of_file):
name_of_file=datetime.datetime.now().strftime('%Y_%m_%d_%H%M%S_')
name_of_file+=''.join([random.choice(string.ascii_letters+string.digits) for ch in range(8)])
name_of_file+='.txt'
game.gui['filename_input'].enterText(name_of_file)
with open(name_of_file, 'w') as the_file:
the_file.write('FOnline:Reloaded Season 3 Character:\n')
the_file.write('S: {s}\nP: {p}\nE: {e}\nC: {c}\nI: {i}\nA: {a}\nL: {l}\n'.format(**self.special))
the_file.write('\nLevel: '+str(self.level)+'\n')
the_file.write('\nTraits:\n')
for trait in self.traits:
the_file.write(' -'+trait+'\n')
the_file.write('\nPerks:\n')
for perk, level in self.perks.items():
the_file.write(' -'+perk+' ('+str(level)+')\n')
the_file.write('\nSkills:\n')
for skill, level in self.skills.items():
if skill in self.tags:
the_file.write(' +')
else:
the_file.write(' -')
the_file.write(str(skill+':').ljust(13)+str(level)+'\n')
the_file.write('\nStats:\n')
the_file.write('Armor Class: {ac:>3}\nAction Points: {ap:>3}\nCarry Weight: {carry_weight:>3}\nMelee Damage: {melee:>3}\nPoison Res.: {poision_res:>3}\nRadiation Res.: {radiation_res:>3}\nHealing Rate: {healing_rate:>3}\nCrit. Chance: {crit_chance:>3}\nCC. Crit. Chance:{crit_chance_cc:>3}\nHit Points: {max_hp:>3}\nHP/Level: {hp_per_level:>3}\nSP/Level: {sp_per_level:>3}\nParty Points: {pp:>3}\nSight: {sight_a}\nFA healed: {fa_healed:>3}\nDoc cooldown: {doc_cooldown:>3}\n'.format(**self.derived))
#game.gui['save']['text']='SAVED !'
game.gui['feedback'].set_text('Exported to: '+name_of_file)
game.gui['feedback_node'].show()
game.toggle_database()
def level_up(self):
'''Level up, adds skills, hp, sp, perks etc
'''
if self.special['none']!=0:
game.gui['feedback'].set_text('You need to use all S.P.E.C.I.A.L. points!')
return
if len(self.tags)!=3:
game.gui['feedback'].set_text('You need to TAG 3 Skills!')
return
if self.level==1:
for char in 'special':
game.gui['special_'+char+'_minus'].hide()
game.gui['special_'+char+'_plus'].hide()
self.last_level_skills= {k:v for k,v in self.skills.items()} #deepcopy
self.memory.append({'special':{k:v for k,v in self.special.items()},
'skills':{k:v for k,v in self.skills.items()},
'last_level_skills':{k:v for k,v in self.last_level_skills.items()},
'sp':int(self.derived['sp']),
'free_perks':int(self.derived['free_perks'])})
self.level+=1
self.derived['lvl']=self.level
self.derived['sp']+=self.derived['sp_per_level']
if self.level <25:
perks_level=3
if 'Skilled' in self.traits:
perks_level+=1
if self.level%perks_level ==0:
self.derived['free_perks']+=1
self.update_ui()
def level_down(self):
'''Same as level_up only in reverse.
'''
if self.level==1:
return
elif self.level==2:
for char in 'special':
game.gui['special_'+char+'_minus'].show()
game.gui['special_'+char+'_plus'].show()
#undo bonuses
for source, value in self.bonuses.items():
button_name='bonus_frame_button_'+source.lower().replace(' ', '')
game.gui.highlight(on=False, name=button_name)
self.bonuses={}
if game.gui['trait_frame'].is_hidden():
game.gui.show_hide(['trait_frame', 'perk_frame','skill_frame'],
['bonus_frame','weapon_frame', 'target_frame','hit_frame'])
game.gui['items']['text']='INVENTORY'
self.level-=1
last_level=self.memory.pop()
self.special=last_level['special']
self.skills=last_level['skills']
self.derived['sp']=last_level['sp']
self.derived['free_perks']=last_level['free_perks']
self.last_level_skills =last_level['last_level_skills']
for name, level in {k:v for k,v in self.perks.items()}.items():
if level > self.level:
del self.perks[name]
button_name='perk_frame_button_'+name.lower().replace(' ', '')
game.gui.highlight(on=False, name=button_name)
self.update_ui()
def set_special(self, name, value):
'''Function for setting S.P.E.CI.A.L. values at level 1
'''
if self.level == 1:
if self.special['none'] < value:
return
if 0 < self.special[name]+value <=10:
self.special['none']-=value
self.special[name]+=value
self.update_ui()
def set_trait(self, name):
''' Function for picking traits at level 1
'''
if self.level == 1:
button_name='trait_frame_button_'+name.lower().replace(' ', '')
if name in self.traits: #remove
if name in self.trait_special:
for special, value in self.trait_special[name].items():
if self.special[special]-value >10 or self.special[special]-value <1:
return
for special, value in self.trait_special[name].items():
self.special[special]-=value
del self.traits[name]
game.gui.highlight(on=False, name=button_name)
elif len(self.traits)<2: #add
if name in self.trait_special:
for special, value in self.trait_special[name].items():
if self.special[special]+value >10 or self.special[special]+value <1:
return
for special, value in self.trait_special[name].items():
self.special[special]+=value
self.traits[name]=0
game.gui.highlight(on=True, name=button_name)
self.update_ui()
def _check_perk_req(self, name):
'''Returns True if the character fulfils all the requirements for a given perk, else returns False
name - name of the perk to check
'''
if name.startswith('Boneyard Guard'):
if self.skills['Small Guns'] >=65 or self.skills['Big Guns'] >=65 or self.skills['Energy Guns'] >=65:
return False
for perk_name in self.perks:
if perk_name.startswith('Boneyard Guard'):
return False
#check if double
if name in self.perks:
return False
#check level
if self.level< self.perk_req[name]['level']:
return False
#check free perk points
if self.perk_req[name]['perk_point'] > self.derived['free_perks']:
return False
#check skill
if self.perk_req[name]['skill']:
skill_pass=False
for skill, value in self.perk_req[name]['skill'].items():
if self.skills[skill]>= value:
skill_pass=True
if not skill_pass:
return False
#check special
for special, value in self.perk_req[name]['min_special'].items():
if self.special[special]<value:
return False
for special, value in self.perk_req[name]['max_special'].items():
if self.special[special]>value:
return False
#check perks
for perk in self.perk_req[name]['perks']:
if perk not in self.perks:
return False
return True
def set_perk(self, name):
'''Function for picking perks, checks requirements before actually giving the perk
'''
if self._check_perk_req(name):
self.perks[name]=self.level
self.derived['free_perks']-= self.perk_req[name]['perk_point']
if name in self.perk_skill_bonus:
for skill, value in self.perk_skill_bonus[name].items():
self.upgrade_skill(skill, value)
if name in self.perk_special_bonus:
for special, value in self.perk_special_bonus[name].items():
self.special[special]+=value
if self.special[special]>10:
self.special[special]=10
button_name='perk_frame_button_'+name.lower().replace(' ', '')
game.gui.highlight(on=True, name=button_name)
self.update_ui()
else:
req_txt ='"'+name+'" perk requires: level:'+str(self.perk_req[name]['level'])+', '
for skill, value in self.perk_req[name]['skill'].items():
req_txt+=skill+': '+str(value)+', '
for special, value in self.perk_req[name]['min_special'].items():
req_txt+=special.upper()+'.>='+str(value)+', '
for special, value in self.perk_req[name]['max_special'].items():
req_txt+=special.upper()+'.<'+str(value)+', '
for perk in self.perk_req[name]['perks']:
req_txt+=perk+', '
game.gui['feedback'].set_text(req_txt.strip(' ').strip(','))
def tag_skill(self, name):
'''Function for tagging skills at level 1,
if not on level 1 dumps all availible point into name skill
name - name of the skill
'''
if self.level==1:
button_name='skill_frame_button_'+name.lower().replace(' ', '')
if name in self.tags:
del self.tags[name]
game.gui.highlight(on=False, name=button_name)
elif len(self.tags)<3:
self.tags[name]=0
game.gui.highlight(on=True, name=button_name)
self.update_ui()
else:
self.set_skill(name, self.derived['sp'])
def _get_skill_cost(self, name, sign=1):
'''Returns the cost of raising a skill by 1 point or lowering it by 1
name - name of the skill
sign - if sign ==-1 returns skill points gained for lowering a skill
'''
v=0 if sign>0 else 1
cost=1
if self.skills[name] >=100+v:
cost+=1
if self.skills[name] >=125+v:
cost+=1
if self.skills[name] >=150+v:
cost+=1
if self.skills[name] >=175+v:
cost+=1
if self.skills[name] >=200+v:
cost+=1
return cost
def upgrade_skill(self, name, value):
''' Raises the name skill by value skill points without using SP!
name - name of the skill
value - number of skill points used
'''
cost=self._get_skill_cost(name)
while value>=cost:
self.skills[name]+=1
if name in self.tags:
self.skills[name]+=1
value-=cost
cost=self._get_skill_cost(name)
if (self.skills[name] >= self.skill_limits[name]):
self.skills[name] = self.skill_limits[name]
self.update_ui()
def set_skill(self, name, value=1):
''' Raises the name skill by value skill points USING SP!
name - name of the skill
value - number of skill points used
'''
if self.level!=1:
i=0
sign=(1 if value>0 else -1)
cost=self._get_skill_cost(name, sign)
#print (cost, '<=', self.derived['sp']*sign, ':', cost <= self.derived['sp']*sign)
while cost*sign <= self.derived['sp'] and i <abs(value):
if self.skills[name]+sign<self.last_level_skills[name]:
return
if (self.skills[name] >= self.skill_limits[name]) and sign==1:
self.skills[name] = self.skill_limits[name]
self.update_ui()
return
self.derived['sp']-=cost*sign
self.skills[name]+=sign
if name in self.tags:
self.skills[name]+=sign
if (self.skills[name] >= self.skill_limits[name]):
self.skills[name] = self.skill_limits[name]
cost=self._get_skill_cost(name, sign)
i+=1
self.update_ui()
def gun(self, name):
'''Sets the current used gun to name
name - the name of the gun (per self.guns)
'''
self.current_gun=name
button_name='weapon_frame_button_'+name.lower().replace(' ', '')
for name, element in game.gui.elements.items():
if name.startswith('weapon_frame_button'):
game.gui.highlight(on=False, name=name)
game.gui.highlight(on=True, name=button_name)
self.fire_mode(None, False)
self.update_ui()
def target_stat(self, name, value):
'''Changes the target_stats name by value
name - name of the stats
value - value added to self.target_stats[name]
'''
self.target_stats[name]+=value
game.gui['target_txt'].set_text(target_template.format(**self.target_stats))
self.update_ui()
def target_preset(self, name):
'''Sets the target_stats to a pre-defined values
name - name of the preset (armor)
'''
for stat_name, value in self.armor_stats[name].items():
self.target_stats[stat_name]=value
button_name='target_frame_button_'+name.lower().replace(' ', '')
for name, element in game.gui.elements.items():
if name.startswith('target_frame_button'):
game.gui.highlight(on=False, name=name)
game.gui.highlight(on=True, name=button_name)
game.gui['target_txt'].set_text(target_template.format(**self.target_stats))
self.update_ui()
def fire_mode(self, name=None, update_ui=True):
'''Changes the current aim mode
name - name of the aim mode
update_ui - wheter or not to update the gui (when used inside self.update_ui())
'''
if name is None:
name=self.aim_mode
if self.current_gun:
if name=='BURST' and self.guns[self.current_gun]['min_burst']==0:
name='UNAIMED'
if self.guns[self.current_gun]['dmg_type']=='explode':
name='UNAIMED'
if self.current_gun in ('Avenger', 'Gatling Laser'):
name='BURST'
self.aim_mode=name
button_name='hit_frame_button_'+name.lower().replace(' ', '')
for name, element in game.gui.elements.items():
if name.startswith('hit_frame_button'):
game.gui.highlight(on=False, name=name)
game.gui.highlight(on=True, name=button_name)
if update_ui:
self.update_ui()
def roll_special(self, special, mod):
'''Returns the chance a S.P.E.C.I.A.L. roll for the target will FAIL
'''
return 1.0-float(min(10.0,max(1.0,self.target_stats[special]+mod)))/10.0
def update_ui(self):
'''Updates all the gui with changed values
'''
self._calc_derived()
if self.current_gun:
with self._special_bonus():
sharpshooter=8 if 'Sharpshooter' in self.perks else 0
skill=self.skills[self.guns[self.current_gun]['skill']]
pe_bonus=2*(self.special['p']-2)
if 'Long Range' in self.guns[self.current_gun]['perks']:
pe_bonus*=2
ac=self.target_stats['ac']
aim=self.aim_bonus[self.aim_mode]
missing_st=max(0,(self.guns[self.current_gun]['min_st']-self.special['s'])*20)
max_hex=int(-(95+ac+aim+missing_st-sharpshooter-skill)/4+pe_bonus)
base_dmg=float(self.guns[self.current_gun]['min_dmg']+
+self.derived['min_dmg']*self.guns[self.current_gun]['min_dmg']+
+self.guns[self.current_gun]['max_dmg']+
+self.derived['max_dmg']*self.guns[self.current_gun]['max_dmg']
)/2.0
dmg_type=self.guns[self.current_gun]['dmg_type']
#melee and bonus 'per-bullet' damage
if self.guns[self.current_gun]['skill'] in ('Small Guns','Big Guns','Energy Guns'):
if 'Bonus Ranged Dmg.' in self.perks:
base_dmg+=3
if 'More Ranged Dmg.' in self.perks:
base_dmg+=4
if self.guns[self.current_gun]['skill'] =='Close Combat':
base_dmg+=self.derived['melee']
#critical hit damage
crit_power=int(self.derived['crit_power'])
crit_chance=int(self.derived['crit_chance'])
if self.guns[self.current_gun]['skill'] == 'Close Combat':
crit_chance=self.derived['crit_chance_cc']
crit_power=self.derived['crit_power_cc']
#aim
crit_chance+=aim*(60+4*self.special['l'])//100
#hidden bonus
crit_chance+=4
#target armor
if 'Sharpshooter' in self.perks and self.aim_mode not in ('UNAIMED', 'BURST'):
if 'Finesse' in self.traits:
crit_chance+=3*self.target_stats['crit_c']//8
else:
crit_chance+=self.target_stats['crit_c']//2
else:
if 'Finesse' in self.traits:
crit_chance+=3*self.target_stats['crit_c']//4
else:
crit_chance+=self.target_stats['crit_c']
crit_power+=self.target_stats['crit_pow']
#% to float
crit_chance=float(crit_chance)/100.0
crit_hit_dmg=copy.deepcopy(self.critical_hit_dmg[self.aim_mode])
if crit_power !=0:
crit_hit_dmg[0][0]-=crit_power
if crit_hit_dmg[0][0]<0:
crit_hit_dmg[1][0]+=crit_hit_dmg[0][0]
crit_hit_dmg[0][0]=0
crit_hit_dmg[-1][0]+=crit_power
if crit_hit_dmg[-1][0]<0:
crit_hit_dmg[-2][0]+=crit_hit_dmg[-1][0]
crit_hit_dmg[-1][0]=0
if crit_hit_dmg[-2][0]<0:
crit_hit_dmg[-3][0]+=crit_hit_dmg[-2][0]
crit_hit_dmg[-2][0]=0
if crit_hit_dmg[-3][0]<0:
crit_hit_dmg[-4][0]+=crit_hit_dmg[-3][0]
crit_hit_dmg[-3][0]=0
crit_dmg=0
for chance, multi in crit_hit_dmg:
crit_dmg+=base_dmg*multi*chance/100.0
dmg=(crit_dmg*crit_chance)+((1.0-crit_chance)*base_dmg)
#critical hit effects
crit_hit_effect=copy.deepcopy(self.critical_hit_effect[self.aim_mode])
if crit_power !=0:
crit_hit_effect[0][0]-=crit_power
if crit_hit_effect[0][0]<0:
crit_hit_effect[1][0]+=crit_hit_effect[0][0]
crit_hit_effect[0][0]=0
crit_hit_effect[-1][0]+=crit_power
if crit_hit_effect[-1][0]<0:
crit_hit_effect[-2][0]+=crit_hit_effect[-1][0]
crit_hit_effect[-1][0]=0
if crit_hit_effect[-2][0]<0:
crit_hit_effect[-3][0]+=crit_hit_effect[-2][0]
crit_hit_effect[-2][0]=0
if crit_hit_effect[-3][0]<0:
crit_hit_effect[-4][0]+=crit_hit_effect[-3][0]
crit_hit_effect[-3][0]=0
crit_effect={}
for chance, result in crit_hit_effect:
if result is not None:
for roll, effects in result.items():
if roll is None:
for effect in effects:
if effect in crit_effect:
crit_effect[effect]+=chance*crit_chance
else:
crit_effect[effect]=chance*crit_chance
else:
for effect in effects:
if effect in crit_effect:
crit_effect[effect]+=chance*crit_chance*self.roll_special(*roll)
else:
crit_effect[effect]=chance*crit_chance*self.roll_special(*roll)
if 'Knockout' in self.guns[self.current_gun]['perks']:
if 'Knockout' in crit_effect:
crit_effect['Knockout']+=100*crit_chance
else:
crit_effect['Knockout']=100*crit_chance
#heavy handed
if self.guns[self.current_gun]['skill'] == 'Close Combat' and 'Heavy Handed' in self.traits:
if self.current_gun == 'Mega Power Fist':
if 'Knockdown' in crit_effect:
crit_effect['Knockdown']+=min(10.0,max(1.0,self.special['s']))*10.0
if crit_effect['Knockdown'] >100.0:
crit_effect['Knockdown']=100.0
else:
crit_effect['Knockdown']=min(10.0,max(1.0,self.special['s']))*10.0
#armor DT
if 'Penetrate' in self.guns[self.current_gun]['perks']:
dmg-=self.target_stats[dmg_type+'_dt']//3
else:
dmg-=self.target_stats[dmg_type+'_dt']
#armor DR
dr=max(0, min(95, self.target_stats[dmg_type+'_dr']+self.guns[self.current_gun]['ammo_dr']+self.derived['target_dr']))/100.0
#factor in critical hit armor bypass
if self.aim_mode not in ('UNAIMED', 'BURST'):
bypass = (50.0*min(100, max(0, 51+crit_power - self.target_stats['l'])))/10000.0
dr=((1.0-crit_chance)*dr)+(crit_chance*dr*(1.0-bypass))
dmg*=1.0-float(dr)
#kamikaze or other final dmg bonus
dmg+=dmg*float(self.derived['bonus_dmg'])/100.0
if dmg_type == 'fire':
dmg+=dmg*float(self.derived['bonus_fire_dmg'])/100.0
hit_text=''
if self.aim_mode == 'BURST':
min_base_dmg=base_dmg*self.guns[self.current_gun]['min_burst']
max_base_dmg=base_dmg*self.guns[self.current_gun]['max_burst']
min_dmg=dmg*self.guns[self.current_gun]['min_burst']
max_dmg=dmg*self.guns[self.current_gun]['max_burst']
hit_text="95% range: {max_hex}\nBase Damage:\n {min_base_dmg:3.1f}-{max_base_dmg:3.1f}\nDamage:\n {min_dmg:3.1f}-{max_dmg:3.1f}\n\n".format(max_hex=max_hex, min_base_dmg=min_base_dmg, max_base_dmg=max_base_dmg, min_dmg=min_dmg, max_dmg=max_dmg)
else:
hit_text="95% range: {max_hex}\nBase Damage: {base_dmg:3.1f}\nDamage: {dmg:3.1f}\n\n".format(max_hex=max_hex, base_dmg=base_dmg, dmg=dmg)
for effect, chance in crit_effect.items():
if chance >0.0:
hit_text+='{chance:>4.1f}% {effect}\n'.format(chance=chance, effect=effect)
if 'Knockback' in self.guns[self.current_gun]['perks']:
hit_text+=' Knockback\n'
if dmg_type == 'explode':
hit_text+=' Explode Knockback\n'
game.gui['hiteffect'].set_text(hit_text)
if game.gui['save']['text']=='SAVED !':
game.gui['save']['text']='SAVE AS .TXT'
with self._special_bonus():
special_formated='{s}\n\n{p}\n\n{e}\n\n{c}\n\n{i}\n\n{a}\n\n{l}\n\n{none}'.format(**self.special)
stats_formated=stat_template.format(**self.derived)
game.gui['specialtxt'].set_text(special_formated)
game.gui['statstxt'].set_text(stats_formated)
game.gui['level'].set_text('LEVEL:'+str(self.level))
for name, value in self.skills.items():
button_name='skill_frame_button_'+name.lower().replace(' ', '')
game.gui[button_name]['text']='{name:<13}{value:>4}%'.format(name=name, value=value)
| [
"os.path.exists",
"random.choice",
"datetime.datetime.now",
"copy.deepcopy"
] | [((48802, 48830), 'os.path.exists', 'os.path.exists', (['name_of_file'], {}), '(name_of_file)\n', (48816, 48830), False, 'import os\n'), ((67149, 67200), 'copy.deepcopy', 'copy.deepcopy', (['self.critical_hit_dmg[self.aim_mode]'], {}), '(self.critical_hit_dmg[self.aim_mode])\n', (67162, 67200), False, 'import copy\n'), ((68242, 68296), 'copy.deepcopy', 'copy.deepcopy', (['self.critical_hit_effect[self.aim_mode]'], {}), '(self.critical_hit_effect[self.aim_mode])\n', (68255, 68296), False, 'import copy\n'), ((48592, 48615), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (48613, 48615), False, 'import datetime\n'), ((48680, 48731), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (48693, 48731), False, 'import random\n'), ((48861, 48884), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (48882, 48884), False, 'import datetime\n'), ((48953, 49004), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (48966, 49004), False, 'import random\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
def go(*args): #处理事件,*args表示可变参数
print(comboxlist.get()) #打印选中的值
win=tkinter.Tk() #构造窗体
comvalue=tkinter.StringVar()#窗体自带的文本,新建一个值
comboxlist=ttk.Combobox(win,textvariable=comvalue) #初始化
comboxlist["values"]=("1","2","3","4")
comboxlist.current(0) #选择第一个
comboxlist.bind("<<ComboboxSelected>>",go) #绑定事件,(下拉列表框被选中时,绑定go()函数)
comboxlist.pack()
win.mainloop() #进入消息循环
| [
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.ttk.Combobox"
] | [((161, 173), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (171, 173), False, 'import tkinter\n'), ((189, 208), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (206, 208), False, 'import tkinter\n'), ((234, 274), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['win'], {'textvariable': 'comvalue'}), '(win, textvariable=comvalue)\n', (246, 274), False, 'from tkinter import ttk\n')] |
# Generated by Django 2.2.5 on 2019-09-20 23:14
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adjudication', '0005_auto_20190918_0711'),
]
operations = [
migrations.AlterField(
model_name='panelist',
name='owners',
field=models.ManyToManyField(blank=True, related_name='panelists', to=settings.AUTH_USER_MODEL),
),
]
| [
"django.db.models.ManyToManyField"
] | [((376, 470), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""panelists"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='panelists', to=settings.\n AUTH_USER_MODEL)\n", (398, 470), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
#
# Read configfile and return it.
'''
Wolfspyre Configurator. v 0.01
Who doesn't like Config Files.
'''
import yaml
from pprint import pprint
from PyBTSteward.dict_utils import smerge_dicts, merge_dict
import logging
logger = logging.getLogger(__name__)
def wpl_cfg(base_cfg='config.yml',custom_config='local_config.yml'):
"""Read in our config file and return a parsed configuration object"""
_config_from_file = {}
with open(base_cfg) as f:
_config_from_file = yaml.load(f)
with open(custom_config) as c:
_custom_cfg = yaml.load(c)
_merged_config = merge_dict(_config_from_file, _custom_cfg)
_config = _merged_config.copy()
_fattened_eddy_devices = {}
_default_eddy_attrs = _merged_config['Beacons']['eddystone']['default'].copy()
_eddy_devices = _merged_config['Beacons']['eddystone']['devices']
for _eddy, _eddy_dict in _eddy_devices.items():
_defaults = _default_eddy_attrs.copy()
if _config_from_file['Logging']['list_devices_in_cfg']:
logger.debug('Merging %s with defaults', _eddy_dict['name'])
_fattened_eddy_devices[_eddy] = smerge_dicts(_defaults, _eddy_dict)
_config['Beacons']['eddystone']['devices'] = _fattened_eddy_devices
if _config['Logging']['print_on_load'] == True:
pprint(_config)
else:
logger.info('Configuration Reloaded')
return _config
| [
"logging.getLogger",
"PyBTSteward.dict_utils.smerge_dicts",
"PyBTSteward.dict_utils.merge_dict",
"yaml.load",
"pprint.pprint"
] | [((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((509, 521), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (518, 521), False, 'import yaml\n'), ((581, 593), 'yaml.load', 'yaml.load', (['c'], {}), '(c)\n', (590, 593), False, 'import yaml\n'), ((619, 661), 'PyBTSteward.dict_utils.merge_dict', 'merge_dict', (['_config_from_file', '_custom_cfg'], {}), '(_config_from_file, _custom_cfg)\n', (629, 661), False, 'from PyBTSteward.dict_utils import smerge_dicts, merge_dict\n'), ((1196, 1231), 'PyBTSteward.dict_utils.smerge_dicts', 'smerge_dicts', (['_defaults', '_eddy_dict'], {}), '(_defaults, _eddy_dict)\n', (1208, 1231), False, 'from PyBTSteward.dict_utils import smerge_dicts, merge_dict\n'), ((1376, 1391), 'pprint.pprint', 'pprint', (['_config'], {}), '(_config)\n', (1382, 1391), False, 'from pprint import pprint\n')] |
from dataclasses import dataclass
from uff.origin import Origin
from uff.rotation import Rotation
@dataclass
class PlaneWaveOrigin(Origin):
rotation: Rotation = Rotation()
| [
"uff.rotation.Rotation"
] | [((168, 178), 'uff.rotation.Rotation', 'Rotation', ([], {}), '()\n', (176, 178), False, 'from uff.rotation import Rotation\n')] |
import os
import shutil
import sys
import glob
import time
import datetime
import logging
from media_grouper.image import Image
from media_grouper.video import Video
from media_grouper.detect import Detector
from typing import List
class MediaGrouper:
def __init__(self,
src: str,
dst: str,
prefixes: List,
start_date: str,
end_date: str,
group_by: str) -> None:
"""
:param src: path to source folder with media data
:param dst: output folder
:param prefixes: list with prefixes like timestamp, quality, faces.
:param start_date: begin date
:param end_date: end date
:param group_by: day/month/year
"""
self.src_folder = src
self.dst_folder = os.path.abspath(dst)
self.prefixes = prefixes
self.date_range = self.get_datetime_range(start_date, end_date)
self.group_by = group_by
def create_destination_folder(self, force_rewrite=False) -> None:
"""
Creates the destination folder.
:param force_rewrite: enable/disable y/n dialog
"""
if os.path.exists(self.dst_folder):
if not force_rewrite:
if input(("The folder '{}' is not empty. \n"
"Press 'y' to delete it: ").format(self.dst_folder)) != "y":
sys.exit()
logging.info("Removed folder: '{}'".format(self.dst_folder))
shutil.rmtree(self.dst_folder)
os.makedirs(self.dst_folder)
logging.info("Created folder: '{}'".format(self.dst_folder))
def get_new_dst_media_name(self, src_media, _date):
"""
:param src_media: path to media file
:param _date: media creation timestamp
:return new media names with prefixes
"""
prefix = ""
if "timestamp" in self.prefixes:
prefix += "{}_".format(int(time.mktime(_date.timetuple())))
faces = None
if "faces" in self.prefixes:
faces = src_media.find_faces()
prefix += "{} person(s)_".format(len(faces))
if "quality" in self.prefixes:
if faces is None:
faces = src_media.find_faces()
quality = src_media.get_quality(faces)
prefix += "quality: {}_".format(quality)
return "{}{}".format(prefix, os.path.basename(src_media.path))
def get_subfolder_name(self, media_date:datetime) -> str:
"""
Creates subfolder name. Depends on options.order_by
:param media_date: timestamp
:return subfoilder name in choices [day/month/year]
"""
subfolder_name = None
if self.group_by == "day":
subfolder_name = media_date.strftime("%Y-%m-%d")
elif self.group_by == "month":
subfolder_name = media_date.strftime("%Y-%b")
else:
subfolder_name = media_date.strftime("%Y")
return subfolder_name
@staticmethod
def get_datetime_range(start_date, end_date):
"""
TBA
"""
if start_date or end_date:
if not start_date:
start_date = datetime.datetime.min
if not end_date:
end_date = datetime.datetime.now()
else:
return None
return (start_date, end_date)
def process_media_data(self) -> None:
"""
Walks throught the source folder and finds all objects
"""
detector = Detector()
logging.info("Working...")
# recursively walks through all files in the source folder
for filename in glob.iglob(self.src_folder + '**/**', recursive=True):
# filter files
if os.path.isdir(filename):
continue
_, file_extension = os.path.splitext(filename)
if file_extension[1:] in Image.SUPPORTED_FORMATS:
media = Image(filename, detector)
faces = media.find_faces()
media.extract_faces(faces)
elif file_extension[1:] in Video.SUPPORTED_FORMATS:
media = Video(filename, None)
media.get_exif_data()
else:
logging.warning("Unsupported file format: ", filename)
continue
# read creation date from exif data
media_date = media.get_creation_date()
# the start/end day options are not required
if self.date_range is not None:
if self.date_range[0] <= media_date <= self.date_range[1]:
continue
# add prefix (if exists)
dst_name = self.get_new_dst_media_name(media, media_date)
# check if order_by exists and create subfolders
if self.group_by:
sub_folder = self.get_subfolder_name(media_date)
sub_folder_path = os.path.join(self.dst_folder, sub_folder)
if not os.path.exists(sub_folder_path):
os.makedirs(sub_folder_path)
new_path = os.path.join(sub_folder_path, dst_name)
else:
new_path = os.path.join(self.dst_folder, dst_name)
logging.info("Copied {} -> {}".format(filename, new_path))
shutil.copy(media.path, new_path)
logging.info("Done!")
| [
"os.path.exists",
"media_grouper.video.Video",
"sys.exit",
"os.makedirs",
"glob.iglob",
"os.path.splitext",
"os.path.join",
"media_grouper.detect.Detector",
"logging.warning",
"datetime.datetime.now",
"os.path.isdir",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"os.path.abspath",... | [((831, 851), 'os.path.abspath', 'os.path.abspath', (['dst'], {}), '(dst)\n', (846, 851), False, 'import os\n'), ((1192, 1223), 'os.path.exists', 'os.path.exists', (['self.dst_folder'], {}), '(self.dst_folder)\n', (1206, 1223), False, 'import os\n'), ((1562, 1590), 'os.makedirs', 'os.makedirs', (['self.dst_folder'], {}), '(self.dst_folder)\n', (1573, 1590), False, 'import os\n'), ((3555, 3565), 'media_grouper.detect.Detector', 'Detector', ([], {}), '()\n', (3563, 3565), False, 'from media_grouper.detect import Detector\n'), ((3574, 3600), 'logging.info', 'logging.info', (['"""Working..."""'], {}), "('Working...')\n", (3586, 3600), False, 'import logging\n'), ((3692, 3745), 'glob.iglob', 'glob.iglob', (["(self.src_folder + '**/**')"], {'recursive': '(True)'}), "(self.src_folder + '**/**', recursive=True)\n", (3702, 3745), False, 'import glob\n'), ((5384, 5405), 'logging.info', 'logging.info', (['"""Done!"""'], {}), "('Done!')\n", (5396, 5405), False, 'import logging\n'), ((1523, 1553), 'shutil.rmtree', 'shutil.rmtree', (['self.dst_folder'], {}), '(self.dst_folder)\n', (1536, 1553), False, 'import shutil\n'), ((2428, 2460), 'os.path.basename', 'os.path.basename', (['src_media.path'], {}), '(src_media.path)\n', (2444, 2460), False, 'import os\n'), ((3789, 3812), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (3802, 3812), False, 'import os\n'), ((3871, 3897), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3887, 3897), False, 'import os\n'), ((5342, 5375), 'shutil.copy', 'shutil.copy', (['media.path', 'new_path'], {}), '(media.path, new_path)\n', (5353, 5375), False, 'import shutil\n'), ((3306, 3329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3327, 3329), False, 'import datetime\n'), ((3984, 4009), 'media_grouper.image.Image', 'Image', (['filename', 'detector'], {}), '(filename, detector)\n', (3989, 4009), False, 'from media_grouper.image import Image\n'), ((4960, 5001), 'os.path.join', 'os.path.join', (['self.dst_folder', 'sub_folder'], {}), '(self.dst_folder, sub_folder)\n', (4972, 5001), False, 'import os\n'), ((5134, 5173), 'os.path.join', 'os.path.join', (['sub_folder_path', 'dst_name'], {}), '(sub_folder_path, dst_name)\n', (5146, 5173), False, 'import os\n'), ((5219, 5258), 'os.path.join', 'os.path.join', (['self.dst_folder', 'dst_name'], {}), '(self.dst_folder, dst_name)\n', (5231, 5258), False, 'import os\n'), ((1427, 1437), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1435, 1437), False, 'import sys\n'), ((4184, 4205), 'media_grouper.video.Video', 'Video', (['filename', 'None'], {}), '(filename, None)\n', (4189, 4205), False, 'from media_grouper.video import Video\n'), ((4278, 4332), 'logging.warning', 'logging.warning', (['"""Unsupported file format: """', 'filename'], {}), "('Unsupported file format: ', filename)\n", (4293, 4332), False, 'import logging\n'), ((5025, 5056), 'os.path.exists', 'os.path.exists', (['sub_folder_path'], {}), '(sub_folder_path)\n', (5039, 5056), False, 'import os\n'), ((5078, 5106), 'os.makedirs', 'os.makedirs', (['sub_folder_path'], {}), '(sub_folder_path)\n', (5089, 5106), False, 'import os\n')] |
import collections
from varappx.common.genotypes import decode_int
from varappx.constants.filters import ALL_VARIANT_FILTER_NAMES
from varappx.main.filters.sort import Sort
from varappx.models.gemini import Variants, GeneDetailed
# For export to frontend
_variant_genotype_expose = {0: [0,0], 1: [0,1], 2: [None,None], 3: [1,1]}
# Actually gt_types=2 means that it is unknown,
# cf. https://github.com/arq5x/gemini/blob/master/gemini/gemini_constants.py and google groups.
VARIANT_FIELDS = [f for f in Variants.__table__.columns.keys()] + ['source']
# A simple, lighter model of Variant - an object with same fields but without special methods
VariantTuple = collections.namedtuple('VariantTuple', VARIANT_FIELDS)
VariantTriplet = collections.namedtuple('VariantTriplet', ['variant_id','gene_symbol','source']) # for compound het
VariantMono = collections.namedtuple('VariantMono', 'variant_id') # for other gen filters
VariantTupleStats = collections.namedtuple('VariantTupleStats', ALL_VARIANT_FILTER_NAMES) # for stats
# Proxy model for variants
# Making all the changes to the data that are necessary to filter correctly
class Variant(Variants):
source = ''
class Meta:
proxy = True
class VariantsCollection:
"""A list of variants - such as the result of evaluating a QuerySet,
the result of a query (filtering) of the databse.
"""
def __init__(self, variants, cache_key=None, db=None):
"""Construct a VariantsCollection based on either a QuerySet
(which we evaluate with `list()`) or a list of Variant objects.
:param db: the name of the db these variants come from.
"""
self.list = list(variants)
self.cache_key = cache_key
self.db = db
def __getitem__(self, item):
return self.list[item]
def __len__(self):
return len(self.list)
#return self.variants.count() if self._n is None else self._n
def __next__(self):
return next(self.list)
def __add__(self, other):
return VariantsCollection(self.list + other.list, db=self.db)
@property
def ids(self):
return [v.variant_id for v in self.list]
def pop(self, i):
self.list.pop(i)
def remove(self, elt):
self.list.remove(elt)
def append(self, sample):
self.list.append(sample)
def extend(self, other):
self.list.extend(other.list)
def sub(self, a, b=None):
"""Return a new collection with only the first N variants."""
if b is None:
return VariantsCollection(self.list[:a], db=self.db)
else:
return VariantsCollection(self.list[a:b], db=self.db)
def get_field_values(self, field_name):
""" Return a list of all values for the given field_name."""
return [getattr(v, field_name) for v in self.list]
def order_by(self, key, reverse=False):
"""Return a new ordered collection of the same elements.
:param key: either a string with the attribute or a list of keys. The special
'location' parameter can be passed, to sort them by chrom + start (chromosome as a string)
:param reverse: if True, sort in the reverse order.
"""
keyl = Sort(key, reverse).key_condition
return VariantsCollection(sorted(self.list, key=keyl, reverse=reverse), db=self.db)
def sort_inplace(self, key, reverse=False):
"""Order the collection in-place"""
keyl = Sort(key, reverse).key_condition
self.list.sort(key=keyl, reverse=reverse)
def __str__(self):
return "<Collection of {} variants>".format(len(self.list))
def expand(self):
return '\n'.join([str(v) for v in self.list])
def expose(self):
return [v.expose() for v in self.list]
def expose_variant(v):
"""The JSON to return to the frontend"""
return {
"variant_id": v.variant_id,
"chrom": v.chrom,
"start": v.start + 1,
"end": v.end,
"ref": v.ref,
"alt": v.alt,
"quality": v.qual,
"genotypes_index": [_variant_genotype_expose[i] for i in decode_int(v.gts)] if v.gts else [],
"pass_filter": v.filter or 'PASS',
"dbsnp": v.rs_ids.split(',') if v.rs_ids is not None else [],
"is_exonic": v.is_exonic,
"is_coding": v.is_coding,
"aaf_1kg_all": v.aaf_1kg_all,
"aaf_esp_all": v.aaf_esp_all,
"aaf_exac_all": v.aaf_exac_all,
"aaf_max_all": v.max_aaf_all,
"gene_symbol": v.gene,
"ensembl_transcript_id": v.transcript,
"impact": v.impact,
"impact_severity": v.impact_severity,
"aa_change": v.aa_change,
"polyphen_pred": v.polyphen_pred,
"polyphen_score": v.polyphen_score,
"sift_pred": v.sift_pred,
"sift_score": v.sift_score,
"cadd_raw": v.cadd_raw,
"cadd_scaled": v.cadd_scaled,
"clinvar_sig": v.clinvar_sig,
"clinvar_disease_acc": v.clinvar_disease_acc.split("|") if v.clinvar_disease_acc is not None else [],
"gerp_bp_score": v.gerp_bp_score,
"gerp_element_pval": v.gerp_element_pval,
"source": v.source,
"qual_depth": v.qual_depth,
"fisher_strand_bias": v.fisher_strand_bias,
"rms_map_qual": v.rms_map_qual,
"hgvsp": v.vep_hgvsp,
"hgvsc": v.vep_hgvsc,
"read_depth": v.read_depth,
"allele_count": v.allele_count,
"allele_freq": v.allele_freq,
"base_qual_rank_sum": v.base_qual_rank_sum,
"map_qual_rank_sum": v.map_qual_rank_sum,
"read_pos_rank_sum": v.read_pos_rank_sum,
"strand_bias_odds_ratio": v.strand_bias_odds_ratio,
"type": v.type,
"allele_depths":v.allele_depths,
"allele_freq_raws":v.allele_freq_raws,
"allele_depths_raws": v.allele_depths_raws,
}
def add_genotypes_selection(v_exposed, samples_selection):
v_exposed["genotypes_index"] = samples_selection.select_x_active(v_exposed["genotypes_index"])
return v_exposed
def expose_variant_full(v, samples_selection):
exp = expose_variant(v)
exp = add_genotypes_selection(exp, samples_selection)
return exp
def annotate_variants(variants, db):
from varappx.handle_init import db as DB
transcripts = [v['ensembl_transcript_id'] for v in variants]
DB.create_all(bind=db)
gds = GeneDetailed.query.filter(GeneDetailed.transcript.in_(transcripts)).all()
gd=[]
for _gd in gds:
gd.append([_gd.transcript,_gd.ensembl_gene_id,_gd.entrez_id])
annot = {}
for t,ensg,entrez in gd:
annot[t] = (ensg, entrez)
for v in variants:
enst = v['ensembl_transcript_id']
ann = annot.get(enst)
if ann:
v['ensembl_gene_id'] = ann[0]
v['entrez_gene_id'] = ann[1]
return variants
| [
"collections.namedtuple",
"varappx.models.gemini.GeneDetailed.transcript.in_",
"varappx.handle_init.db.create_all",
"varappx.common.genotypes.decode_int",
"varappx.models.gemini.Variants.__table__.columns.keys",
"varappx.main.filters.sort.Sort"
] | [((662, 716), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantTuple"""', 'VARIANT_FIELDS'], {}), "('VariantTuple', VARIANT_FIELDS)\n", (684, 716), False, 'import collections\n'), ((734, 819), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantTriplet"""', "['variant_id', 'gene_symbol', 'source']"], {}), "('VariantTriplet', ['variant_id', 'gene_symbol',\n 'source'])\n", (756, 819), False, 'import collections\n'), ((848, 899), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantMono"""', '"""variant_id"""'], {}), "('VariantMono', 'variant_id')\n", (870, 899), False, 'import collections\n'), ((945, 1014), 'collections.namedtuple', 'collections.namedtuple', (['"""VariantTupleStats"""', 'ALL_VARIANT_FILTER_NAMES'], {}), "('VariantTupleStats', ALL_VARIANT_FILTER_NAMES)\n", (967, 1014), False, 'import collections\n'), ((6338, 6360), 'varappx.handle_init.db.create_all', 'DB.create_all', ([], {'bind': 'db'}), '(bind=db)\n', (6351, 6360), True, 'from varappx.handle_init import db as DB\n'), ((504, 537), 'varappx.models.gemini.Variants.__table__.columns.keys', 'Variants.__table__.columns.keys', ([], {}), '()\n', (535, 537), False, 'from varappx.models.gemini import Variants, GeneDetailed\n'), ((3233, 3251), 'varappx.main.filters.sort.Sort', 'Sort', (['key', 'reverse'], {}), '(key, reverse)\n', (3237, 3251), False, 'from varappx.main.filters.sort import Sort\n'), ((3466, 3484), 'varappx.main.filters.sort.Sort', 'Sort', (['key', 'reverse'], {}), '(key, reverse)\n', (3470, 3484), False, 'from varappx.main.filters.sort import Sort\n'), ((6397, 6437), 'varappx.models.gemini.GeneDetailed.transcript.in_', 'GeneDetailed.transcript.in_', (['transcripts'], {}), '(transcripts)\n', (6424, 6437), False, 'from varappx.models.gemini import Variants, GeneDetailed\n'), ((4121, 4138), 'varappx.common.genotypes.decode_int', 'decode_int', (['v.gts'], {}), '(v.gts)\n', (4131, 4138), False, 'from varappx.common.genotypes import decode_int\n')] |
from stack_and_queue import __version__
import pytest
from stack_and_queue.stack_and_queue import Stack , Queue
def test_version():
assert __version__ == '0.1.0'
def test_push_onto_a_stack():
node = Stack()
node.push(1)
excepted =1
actual = node.top.data
assert excepted == actual
def test_push_multiple_values_onto_a_stack():
node = Stack()
node.push(1)
node.push(2)
excepted =2
actual = node.top.data
assert excepted == actual
def test_pop_off_the_stack():
node = Stack()
node.push(1)
node.push(2)
node.pop()
excepted =1
actual = node.top.data
assert excepted == actual
def test_empty_a_stack_after_multiple_pops():
node = Stack()
node.push(1)
node.push(2)
node.pop()
node.pop()
excepted =True
actual = node.is_empty()
assert excepted == actual
def test_peek_the_next_item_on_the_stack():
node = Stack()
node.push(1)
node.push(2)
excepted =2
actual = node.peek()
assert excepted == actual
def test_instantiate_an_empty_stack():
node = Stack()
assert node.is_empty()
def test_Calling_pop_or_peek_on_empty_stack_raises_exception():
node = Stack()
try:
node.pop()
except Exception as e:
assert str(e) == "empty stack"
def test_enqueue_into_a_queue():
node = Queue()
node.enqueue(1)
excepted =1 , 1
actual = node.rear.data , node.front.data
assert excepted == actual
def test_enqueue_multiple_values_into_a_queue():
node = Queue()
node.enqueue(1)
node.enqueue(2)
excepted =2 , 1
actual = node.rear.data , node.front.data
assert excepted == actual
def test_dequeue_out_of_a_queue_the_expected_value():
node = Queue()
node.enqueue(1)
node.enqueue(2)
excepted =1
actual = node.dequeue()
assert excepted == actual
def test_peek_into_a_queue_seeing_the_expected_value():
node = Queue()
node.enqueue(2)
excepted =2
actual = node.peek()
assert excepted == actual
def test_empty_a_queue_after_multiple_dequeues():
node = Queue()
node.enqueue(1)
node.enqueue(2)
node.dequeue()
node.dequeue()
excepted = True
actual = node.is_empty()
assert excepted == actual
def test_instantiate_an_empty_queue():
node = Queue()
assert node.is_empty()
def test_Calling_dequeue_or_peek_on_empty_queue_raises_exception():
node = Queue()
try:
node.peek()
except Exception as e:
assert str(e) == 'empty queue'
| [
"stack_and_queue.stack_and_queue.Queue",
"stack_and_queue.stack_and_queue.Stack"
] | [((211, 218), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (216, 218), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((367, 374), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (372, 374), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((524, 531), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (529, 531), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((712, 719), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (717, 719), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((918, 925), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (923, 925), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1082, 1089), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (1087, 1089), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1193, 1200), 'stack_and_queue.stack_and_queue.Stack', 'Stack', ([], {}), '()\n', (1198, 1200), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1340, 1347), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (1345, 1347), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1525, 1532), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (1530, 1532), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1735, 1742), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (1740, 1742), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((1925, 1932), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (1930, 1932), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((2086, 2093), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (2091, 2093), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((2303, 2310), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (2308, 2310), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n'), ((2418, 2425), 'stack_and_queue.stack_and_queue.Queue', 'Queue', ([], {}), '()\n', (2423, 2425), False, 'from stack_and_queue.stack_and_queue import Stack, Queue\n')] |
# Time: O(n * l^2), it also takes O(l) to make the substring w[:i]+w[i+1:]
# Space: O(n * l)
# 1048 weekly contest 137 5/18/2019
# Given a list of words, each word consists of English lowercase letters.
#
# Let's say word1 is a predecessor of word2 if and only if we can add exactly one letter anywhere in word1
# to make it equal to word2. For example, "abc" is a predecessor of "abac".
#
# A word chain is a sequence of words [word_1, word_2, ..., word_k] with k >= 1, where word_1 is a
# predecessor of word_2, word_2 is a predecessor of word_3, and so on.
#
# Return the longest possible length of a word chain with words chosen from the given list of words.
# 1 <= words.length <= 1000
# 1 <= words[i].length <= 16
from typing import List
import collections
class Solution(object):
def longestStrChain(self, words): # USE THIS 200ms
"""
:type words: List[str]
:rtype: int
"""
words.sort(key=len)
dp = collections.defaultdict(int)
for w in words:
for i in range(len(w)):
dp[w] = max(dp[w], dp[w[:i]+w[i+1:]]+1)
return max(dp.values())
# O(n^2 * l) 1920ms
def longestStrChain_ming(self, words: List[str]) -> int:
def isSub(s1, s2):
if len(s1) + 1 != len(s2): return False
i = 0
for c in s2:
if c == s1[i]:
i += 1
if i == len(s1): return True
return False
dp = [1] * len(words)
words.sort(key=len)
for i in range(1, len(words)): # traverse all words
for j in reversed(range(i)): # traverse all previous words, reverse to reduce the call to isSub()
if dp[j] + 1 > dp[i] and isSub(words[j], words[i]):
dp[i] = dp[j] + 1
return max(dp)
print(Solution().longestStrChain(["a","b","ba","bca","bda","bdca"])) # 4
print(Solution().longestStrChain([
"ksqvsyq","ks","kss","czvh","zczpzvdhx","zczpzvh","zczpzvhx","zcpzvh","zczvh","gr",
"grukmj","ksqvsq","gruj","kssq","ksqsq","grukkmj","grukj","zczpzfvdhx","gru"])) # 7 | [
"collections.defaultdict"
] | [((965, 993), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (988, 993), False, 'import collections\n')] |
#!/usr/bin/python
# Script to commit and push to a remote repository using gitPython package
# Usage: python commitAndPushToRemote.py
# Author: <NAME>
# Date: 18 July 2019
# Version: v0.0.2 (23 August 2019)
# NOTE: Repository should already exist and have a remote named 'origin' configured
from git import Repo # Importing gitPython to operate over repositories
from pathlib import Path # Import Path composing functions to maintain directory path integrity
import sys # Import system functionality
def main(argv):
# Path to local repository FOLDER
source_git_dir = Path(r"C:\Users\USERNAME\test_repository")
# Commit message
message = 'Commited through gitPython'
try:
# Create Repo object from specified path
repo = Repo(source_git_dir)
# Add all changed files to staging area
repo.git.add(update=True)
# Commit changes with choosen message
repo.index.commit(message)
# Pull ref that points to the remote called 'origin'
origin = repo.remote(name='origin')
# Push to remote (origin)
origin.push()
except Exception as e:
print("Something went wrong while pushing the code. Error: " + str(e))
finally:
print("Code push from gitPython succeeded")
if __name__ == "__main__":
main(sys.argv[1:])
| [
"git.Repo",
"pathlib.Path"
] | [((580, 624), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\USERNAME\\\\test_repository"""'], {}), "('C:\\\\Users\\\\USERNAME\\\\test_repository')\n", (584, 624), False, 'from pathlib import Path\n'), ((761, 781), 'git.Repo', 'Repo', (['source_git_dir'], {}), '(source_git_dir)\n', (765, 781), False, 'from git import Repo\n')] |
import os
class Configuration:
DEBUG = True
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE = os.path.join(PROJECT_ROOT, 'tmp', 'flask_test.db')
SQLALCHEMY_DATABASE_URI = f'sqlite:///{DATABASE}'
SQLALCHEMY_TRACK_MODIFICATIONS = True
| [
"os.path.realpath",
"os.path.join"
] | [((128, 178), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""tmp"""', '"""flask_test.db"""'], {}), "(PROJECT_ROOT, 'tmp', 'flask_test.db')\n", (140, 178), False, 'import os\n'), ((85, 111), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os\n')] |
# Temporary version of simplejson in a single file to get around older packaging
# restrictions in previous releases of Access Grid 2.
# ---------------------------------------------------------------------------------------
# simplejson/scanner.py
"""
Iterator based sre token scanner
"""
import sre_parse, sre_compile, sre_constants
from sre_constants import BRANCH, SUBPATTERN
from re import VERBOSE, MULTILINE, DOTALL
import re
__all__ = ['Scanner', 'pattern']
FLAGS = (VERBOSE | MULTILINE | DOTALL)
class Scanner(object):
def __init__(self, lexicon, flags=FLAGS):
self.actions = [None]
# combine phrases into a compound pattern
s = sre_parse.Pattern()
s.flags = flags
p = []
for idx, token in enumerate(lexicon):
phrase = token.pattern
try:
subpattern = sre_parse.SubPattern(s,
[(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
except sre_constants.error:
raise
p.append(subpattern)
self.actions.append(token)
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def iterscan(self, string, idx=0, context=None):
"""
Yield match, end_idx for each match
"""
match = self.scanner.scanner(string, idx).match
actions = self.actions
lastend = idx
end = len(string)
while True:
m = match()
if m is None:
break
matchbegin, matchend = m.span()
if lastend == matchend:
break
action = actions[m.lastindex]
if action is not None:
rval, next_pos = action(m, context)
if next_pos is not None and next_pos != matchend:
# "fast forward" the scanner
matchend = next_pos
match = self.scanner.scanner(string, matchend).match
yield rval, matchend
lastend = matchend
def pattern(pattern, flags=FLAGS):
def decorator(fn):
fn.pattern = pattern
fn.regex = re.compile(pattern, flags)
return fn
return decorator
# ---------------------------------------------------------------------------------------
# simplejson/encoder.py
"""
Implementation of JSONEncoder
"""
import re
#try:
# from simplejson import _speedups
#except ImportError:
# _speedups = None
_speedups = None
ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])')
ESCAPE_DCT = {
# escape all forward slashes to prevent </script> attack
'/': '\\/',
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
def floatstr(o, allow_nan=True):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return str(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
def encode_basestring(s):
"""
Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def encode_basestring_ascii(s):
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
try:
encode_basestring_ascii = _speedups.encode_basestring_ascii
_need_utf8 = True
except AttributeError:
_need_utf8 = False
class JSONEncoder(object):
"""
Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
__all__ = ['__init__', 'default', 'encode', 'iterencode']
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8'):
"""
Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
self.current_indent_level = 0
if separators is not None:
self.item_separator, self.key_separator = separators
self.encoding = encoding
def _newline_indent(self):
return '\n' + (' ' * (self.indent * self.current_indent_level))
def _iterencode_list(self, lst, markers=None):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
yield '['
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
yield separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(self, dct, markers=None):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
key_separator = self.key_separator
if self.indent is not None:
self.current_indent_level += 1
newline_indent = self._newline_indent()
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
allow_nan = self.allow_nan
if self.sort_keys:
keys = dct.keys()
keys.sort()
items = [(k, dct[k]) for k in keys]
else:
items = dct.iteritems()
_encoding = self.encoding
_do_decode = (_encoding is not None
and not (_need_utf8 and _encoding == 'utf-8'))
for key, value in items:
if isinstance(key, str):
if _do_decode:
key = key.decode(_encoding)
elif isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = floatstr(key, allow_nan)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif self.skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield encoder(key)
yield key_separator
for chunk in self._iterencode(value, markers):
yield chunk
if newline_indent is not None:
self.current_indent_level -= 1
yield self._newline_indent()
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(self, o, markers=None):
if isinstance(o, basestring):
if self.ensure_ascii:
encoder = encode_basestring_ascii
else:
encoder = encode_basestring
_encoding = self.encoding
if (_encoding is not None and isinstance(o, str)
and not (_need_utf8 and _encoding == 'utf-8')):
o = o.decode(_encoding)
yield encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield floatstr(o, self.allow_nan)
elif isinstance(o, (list, tuple)):
for chunk in self._iterencode_list(o, markers):
yield chunk
elif isinstance(o, dict):
for chunk in self._iterencode_dict(o, markers):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
for chunk in self._iterencode_default(o, markers):
yield chunk
if markers is not None:
del markers[markerid]
def _iterencode_default(self, o, markers=None):
newobj = self.default(o)
return self._iterencode(newobj, markers)
def default(self, o):
"""
Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""
Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo":["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks...
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8' and _need_utf8)):
o = o.decode(_encoding)
return encode_basestring_ascii(o)
# This doesn't pass the iterator directly to ''.join() because it
# sucks at reporting exceptions. It's going to do this internally
# anyway because it uses PySequence_Fast or similar.
chunks = list(self.iterencode(o))
return ''.join(chunks)
def iterencode(self, o):
"""
Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self._iterencode(o, markers)
# ---------------------------------------------------------------------------------------
# simplejson/decoder.py
"""
Implementation of JSONDecoder
"""
import re
# from simplejson.scanner import Scanner, pattern
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
import struct
import sys
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
'true': True,
'false': False,
'null': None,
}
def JSONConstant(match, context, c=_CONSTANTS):
return c[match.group(0)], None
pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
def JSONNumber(match, context):
match = JSONNumber.regex.match(match.string, *match.span())
integer, frac, exp = match.groups()
if frac or exp:
res = float(integer + (frac or '') + (exp or ''))
else:
res = int(integer)
return res, None
pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber)
STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
try:
m = unichr(int(esc, 16))
if len(esc) != 4 or not esc.isalnum():
raise ValueError
except ValueError:
raise ValueError(errmsg("Invalid \\uXXXX escape", s, end))
end += 5
_append(m)
return u''.join(chunks), end
def JSONString(match, context):
encoding = getattr(context, 'encoding', None)
return scanstring(match.string, match.end(), encoding)
pattern(r'"')(JSONString)
WHITESPACE = re.compile(r'\s*', FLAGS)
def JSONObject(match, context, _w=WHITESPACE.match):
pairs = {}
s = match.string
end = _w(s, match.end()).end()
nextchar = s[end:end + 1]
# trivial empty object
if nextchar == '}':
return pairs, end + 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
encoding = getattr(context, 'encoding', None)
iterscan = JSONScanner.iterscan
while True:
key, end = scanstring(s, end, encoding)
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end = _w(s, end + 1).end()
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == '}':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
object_hook = getattr(context, 'object_hook', None)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
pattern(r'{')(JSONObject)
def JSONArray(match, context, _w=WHITESPACE.match):
values = []
s = match.string
end = _w(s, match.end()).end()
# look-ahead for trivial empty array
nextchar = s[end:end + 1]
if nextchar == ']':
return values, end + 1
iterscan = JSONScanner.iterscan
while True:
try:
value, end = iterscan(s, idx=end, context=context).next()
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
values.append(value)
end = _w(s, end).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
if nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
end = _w(s, end).end()
return values, end
pattern(r'\[')(JSONArray)
ANYTHING = [
JSONObject,
JSONArray,
JSONString,
JSONConstant,
JSONNumber,
]
JSONScanner = Scanner(ANYTHING)
class JSONDecoder(object):
"""
Simple JSON <http://json.org> decoder
Performs the following translations in decoding:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
_scanner = Scanner(ANYTHING)
__all__ = ['__init__', 'decode', 'raw_decode']
def __init__(self, encoding=None, object_hook=None):
"""
``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
"""
self.encoding = encoding
self.object_hook = object_hook
def decode(self, s, _w=WHITESPACE.match):
"""
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, **kw):
"""
Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
kw.setdefault('context', self)
try:
obj, end = self._scanner.iterscan(s, **kw).next()
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
__all__ = ['JSONDecoder']
# ---------------------------------------------------------------------------------------
# simplejson/__init__.py
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.7.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
# from decoder import JSONDecoder
# from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
simplejson_dumps = dumps
simplejson_loads = loads
# ---------------------------------------------------------------------------------------
# simplejson/jsonfilter
import cgi
class JSONFilter(object):
def __init__(self, app, mime_type='text/x-json'):
self.app = app
self.mime_type = mime_type
def __call__(self, environ, start_response):
# Read JSON POST input to jsonfilter.json if matching mime type
response = {'status': '200 OK', 'headers': []}
def json_start_response(status, headers):
response['status'] = status
response['headers'].extend(headers)
environ['jsonfilter.mime_type'] = self.mime_type
if environ.get('REQUEST_METHOD', '') == 'POST':
if environ.get('CONTENT_TYPE', '') == self.mime_type:
args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _]
data = environ['wsgi.input'].read(*map(int, args))
environ['jsonfilter.json'] = simplejson.loads(data)
res = simplejson_dumps(self.app(environ, json_start_response))
jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp')
if jsonp:
content_type = 'text/javascript'
res = ''.join(jsonp + ['(', res, ')'])
elif 'Opera' in environ.get('HTTP_USER_AGENT', ''):
# Opera has bunk XMLHttpRequest support for most mime types
content_type = 'text/plain'
else:
content_type = self.mime_type
headers = [
('Content-type', content_type),
('Content-length', len(res)),
]
headers.extend(response['headers'])
start_response(response['status'], headers)
return [res]
def factory(app, global_conf, **kw):
return JSONFilter(app, **kw)
# ---------------------------------------------------------------------------------------
| [
"sre_parse.SubPattern",
"re.compile",
"struct.unpack",
"sre_compile.compile",
"sre_parse.parse",
"warnings.warn",
"sre_parse.Pattern"
] | [((2538, 2585), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x19\\\\\\\\"\\\\b\\\\f\\\\n\\\\r\\\\t]"""'], {}), '(\'[\\\\x00-\\\\x19\\\\\\\\"\\\\b\\\\f\\\\n\\\\r\\\\t]\')\n', (2548, 2585), False, 'import re\n'), ((2593, 2626), 're.compile', 're.compile', (['"""([\\\\\\\\"/]|[^\\\\ -~])"""'], {}), '(\'([\\\\\\\\"/]|[^\\\\ -~])\')\n', (2603, 2626), False, 'import re\n'), ((17138, 17173), 're.compile', 're.compile', (['"""(.*?)(["\\\\\\\\])"""', 'FLAGS'], {}), '(\'(.*?)(["\\\\\\\\])\', FLAGS)\n', (17148, 17173), False, 'import re\n'), ((18903, 18928), 're.compile', 're.compile', (['"""\\\\s*"""', 'FLAGS'], {}), "('\\\\s*', FLAGS)\n", (18913, 18928), False, 'import re\n'), ((15871, 15898), 'struct.unpack', 'struct.unpack', (['"""dd"""', '_BYTES'], {}), "('dd', _BYTES)\n", (15884, 15898), False, 'import struct\n'), ((34896, 34990), 'warnings.warn', 'warnings.warn', (['"""simplejson.loads(s) should be used instead of read(s)"""', 'DeprecationWarning'], {}), "('simplejson.loads(s) should be used instead of read(s)',\n DeprecationWarning)\n", (34909, 34990), False, 'import warnings\n'), ((35130, 35225), 'warnings.warn', 'warnings.warn', (['"""simplejson.dumps(s) should be used instead of write(s)"""', 'DeprecationWarning'], {}), "('simplejson.dumps(s) should be used instead of write(s)',\n DeprecationWarning)\n", (35143, 35225), False, 'import warnings\n'), ((670, 689), 'sre_parse.Pattern', 'sre_parse.Pattern', ([], {}), '()\n', (687, 689), False, 'import sre_parse, sre_compile, sre_constants\n'), ((1106, 1152), 'sre_parse.SubPattern', 'sre_parse.SubPattern', (['s', '[(BRANCH, (None, p))]'], {}), '(s, [(BRANCH, (None, p))])\n', (1126, 1152), False, 'import sre_parse, sre_compile, sre_constants\n'), ((1176, 1198), 'sre_compile.compile', 'sre_compile.compile', (['p'], {}), '(p)\n', (1195, 1198), False, 'import sre_parse, sre_compile, sre_constants\n'), ((2195, 2221), 're.compile', 're.compile', (['pattern', 'flags'], {}), '(pattern, flags)\n', (2205, 2221), False, 'import re\n'), ((924, 954), 'sre_parse.parse', 'sre_parse.parse', (['phrase', 'flags'], {}), '(phrase, flags)\n', (939, 954), False, 'import sre_parse, sre_compile, sre_constants\n')] |
#!/usr/bin/python3
# wucheng August 8, 2019
# sockmetrics.py --- collect sock communication info among hosts
# version 0.9
# two methods to output results:
# 1. to screen
# 2. to TDengine, a kind of structured time series db
# please run ./sockmetrics -h to get help
# This program bases on https://github.com/iovisor/bcc
# Licensed Apache-2.0
#from __future__ import print_function
from bcc import BPF
import argparse
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
from time import sleep, strftime
from subprocess import call
from collections import namedtuple, defaultdict
import os
import json
import taos
import datetime
# arguments
def range_check(string):
value = int(string)
if value < 1 or value > 600:
msg = "value must be 1 ~ 600, got %d" % (value,)
raise argparse.ArgumentTypeError(msg)
return value
def positive_check(string):
value = int(string)
if value < 1:
msg = "value must be stricly positive, got %d" % (value,)
raise argparse.ArgumentTypeError(msg)
return value
examples = """examples:
./sockmetrics --conf_file='/etc/sockmetrics.conf' # input all sock trace data into tsdb configured in conf_file
./sockmetrics # trace Sock send/recv by host on screen
./sockmetrics -p 181 # only trace PID 181 on screen
"""
parser = argparse.ArgumentParser(
description="Summarize Sock send/recv throughput by host",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("--conf_file",
help="this argument points the configure file, and is exclusive and discards other arguments ")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("interval", nargs="?", default=1, type=range_check,
help="output interval, in seconds (default 1), range 1 ~ 600")
parser.add_argument("count", nargs="?", default=-1, type=positive_check,
help="number of the records with the top recerived bytes to output per interval")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
def get_arguments_from_conf_file(conf_file):
with open(conf_file, 'r') as jsonfile:
_d = json.load(jsonfile)
if _d.get("dbhost") and _d.get("database") \
and _d.get("user") and _d.get("password"):
return _d
else:
raise Exception("conf_file %s is invalid."%conf_file)
conf_dict = None
if args.conf_file and os.stat(args.conf_file):
conf_dict = get_arguments_from_conf_file(args.conf_file)
args.pid = None
_interval = conf_dict.get('interval')
if _interval is not None and _interval > 0 and _interval <= 600:
args.interval = _interval
else:
args.interval = 1
_count = conf_dict.get('count')
if _count is not None and _count > 0:
args.count = _count
else:
args.count = -1
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
#include <linux/net.h>
#include <uapi/linux/ip.h>
#include <linux/ip.h>
struct ipv4_key_t {
u32 pid;
u32 saddr;
u32 daddr;
u16 lport;
u16 dport;
u16 socktype;
};
BPF_HASH(ipv4_send_bytes, struct ipv4_key_t);
BPF_HASH(ipv4_recv_bytes, struct ipv4_key_t);
struct ipv6_key_t {
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u16 lport;
u16 dport;
u16 socktype;
};
BPF_HASH(ipv6_send_bytes, struct ipv6_key_t);
BPF_HASH(ipv6_recv_bytes, struct ipv6_key_t);
//static int kprobe__packet_snd(struct pt_regs *ctx, struct socket *sock,
// struct msghdr *msg, size_t len)
int kprobe__inet_sendmsg(struct pt_regs *ctx, struct socket *sock, struct msghdr *msg, size_t size)
{
//bpf_trace_printk("entry inet_sendmsg now. \\n");
u32 pid = bpf_get_current_pid_tgid();
FILTER
struct sock *sk = sock->sk;
u16 dport = 0, family = sk->__sk_common.skc_family;
//bpf_trace_printk("inet_sendmsg got family %d, and protocol is %d\\n", family, sk->__sk_common.skc_prot);
if (family == AF_INET && sk->__sk_common.skc_rcv_saddr != sk->__sk_common.skc_daddr) {
struct ipv4_key_t ipv4_key = {.pid = pid};
ipv4_key.saddr = sk->__sk_common.skc_rcv_saddr;
ipv4_key.daddr = sk->__sk_common.skc_daddr;
ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport);
ipv4_key.socktype = sock->type;
ipv4_send_bytes.increment(ipv4_key, size);
}
if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid};
__builtin_memcpy(&ipv6_key.saddr,
sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32, sizeof(ipv6_key.saddr));
__builtin_memcpy(&ipv6_key.daddr,
sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32, sizeof(ipv6_key.daddr));
if (ipv6_key.saddr == ipv6_key.daddr)
return 0;
ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport);
ipv6_key.socktype = sock->type;
ipv6_send_bytes.increment(ipv6_key, size);
}
// else drop
return 0;
}
int kprobe__inet_recvmsg(struct pt_regs *ctx, struct socket *sock, struct msghdr *msg, size_t size)
{
u32 pid = bpf_get_current_pid_tgid();
FILTER
struct sock *sk = sock->sk;
u16 dport = 0, family = sk->__sk_common.skc_family;
u64 *val, zero = 0;
// bpf_trace_printk("inet_recvmsg got family %d\\n", family);
if (size <= 0)
return 0;
if (family == AF_INET && sk->__sk_common.skc_rcv_saddr != sk->__sk_common.skc_daddr) {
struct ipv4_key_t ipv4_key = {.pid = pid};
ipv4_key.saddr = sk->__sk_common.skc_rcv_saddr;
ipv4_key.daddr = sk->__sk_common.skc_daddr;
ipv4_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv4_key.dport = ntohs(dport);
ipv4_key.socktype = sock->type;
ipv4_recv_bytes.increment(ipv4_key,size);
}
if (family == AF_INET6) {
struct ipv6_key_t ipv6_key = {.pid = pid};
__builtin_memcpy(&ipv6_key.saddr,
sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32, sizeof(ipv6_key.saddr));
__builtin_memcpy(&ipv6_key.daddr,
sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32, sizeof(ipv6_key.daddr));
if (ipv6_key.saddr == ipv6_key.daddr)
return 0;
ipv6_key.lport = sk->__sk_common.skc_num;
dport = sk->__sk_common.skc_dport;
ipv6_key.dport = ntohs(dport);
ipv6_key.socktype = sock->type;
ipv6_recv_bytes.increment(ipv6_key, size);
}
// else drop
return 0;
}
"""
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('FILTER', '')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
SockSessionKey = namedtuple('SockSession', ['pid', 'laddr', 'lport', 'daddr', 'dport', 'socktype'])
def pid_to_comm(pid):
try:
comm = open("/proc/%d/comm" % pid, "r").read().rstrip()
return comm
except IOError:
return str(pid)
def _to_socktype(type): #https://elixir.bootlin.com/linux/latest/source/include/linux/net.h#L60
return "TCP" if type==1 else "UDP" if type==2 else "Other"
def get_ipv4_session_key(k):
return SockSessionKey(pid=k.pid,
laddr=inet_ntop(AF_INET, pack("I", k.saddr)),
lport=k.lport,
daddr=inet_ntop(AF_INET, pack("I", k.daddr)),
dport=k.dport,
socktype=k.socktype)
def get_ipv6_session_key(k):
return SockSessionKey(pid=k.pid,
laddr=inet_ntop(AF_INET6, k.saddr),
lport=k.lport,
daddr=inet_ntop(AF_INET6, k.daddr),
dport=k.dport,
socktype=k.socktype)
def getdbconnection(cnfdict):
_conn = taos.connect(host=cnfdict["dbhost"], user=cnfdict["user"],
password=cnfdict["password"], database=cnfdict["database"])
print(_conn._host)
return _conn
def sqlexecute(cursor, sqltext):
#continue when network error, etc.
try:
cursor.execute(sqltext)
return True
except:
return False
def createtables(cursor):
_ipv4_ready = sqlexecute(cursor,
"""create table if not exists ipv4_metrics (
epoch timestamp,
pid int,
comm binary(40),
type int,
laddr binary(15),
lport int,
raddr binary(15),
rport int,
rx_byte bigint,
tx_byte bigint,
interval_sum smallint)""")
_ipv6_ready = sqlexecute(cursor,
"""create table if not exists ipv6_metrics (
epoch timestamp,
pid int,
comm binary(40),
type int,
laddr6 binary(39),
lport int,
raddr6 binary(39),
rport int,
rx_byte bigint,
tx_byte bigint,
interval_sum smallint)""")
if _ipv4_ready and _ipv6_ready:
return True
else:
return False
# initialize BPF
b = BPF(text=bpf_text)
"""
if b.get_kprobe_functions(b"netif_receive_skb"):
b.attach_kprobe(event="netif_receive_skb", fn_name="trace_netif_receive_skb")
else:
print("ERROR: netif_receive_skb() kernel function not found or traceable. "
"Older kernel versions not supported.")
exit()
"""
ipv4_send_bytes = b["ipv4_send_bytes"]
ipv4_recv_bytes = b["ipv4_recv_bytes"]
ipv6_send_bytes = b["ipv6_send_bytes"]
ipv6_recv_bytes = b["ipv6_recv_bytes"]
# tsdb if conf_file is set
if conf_dict:
output_db = True
else:
output_db =False
if output_db:
conn = getdbconnection(conf_dict)
csr = conn.cursor()
i_reconnect = 0
if not createtables(csr):
raise(Exception("Something wrong when create tables."))
print('Tracing... Output every %s secs. Hit Ctrl-C to end' % args.interval)
print('Collected data will input to tsdb...')
# output
exiting = False
while not exiting:
try:
sleep(args.interval)
except KeyboardInterrupt:
exiting = True
if output_db:
csr.close()
conn.close()
# reset dbconnect. continue when network error, etc.
if output_db and i_reconnect >= 100:
i_reconnect = 0
try:
csr.close()
conn.close()
conn = getdbconnection(conf_dict)
csr = conn.cursor()
except:
pass
# IPv4: build dict of all seen keys
ipv4_throughput = defaultdict(lambda: [0, 0])
for k, v in ipv4_send_bytes.items():
key = get_ipv4_session_key(k)
ipv4_throughput[key][0] = v.value
ipv4_send_bytes.clear()
for k, v in ipv4_recv_bytes.items():
key = get_ipv4_session_key(k)
ipv4_throughput[key][1] = v.value
ipv4_recv_bytes.clear()
if not output_db and ipv4_throughput:
print("%-6s %-12s %-6s %-21s %-21s %9s %9s" % ("PID", "COMM", "TYPE",
"LADDR", "RADDR", "RX_Byte", "TX_Byte"))
# output
i = 0
for k, (send_bytes, recv_bytes) in (ipv4_throughput.items()
if args.count==-1 else sorted(ipv4_throughput.items(),
key=lambda kv: sum(kv[1]),
reverse=True)):
if args.count > 0 and i >= args.count:
break
if output_db:
sqlexecute(csr, """insert into
ipv4_metrics (epoch, pid, comm, type, laddr, lport, raddr, rport, rx_byte, tx_byte, interval_sum)
values ('%s', %d, '%s', %d, '%s', %d, '%s', %d, %d, %d, %d)"""
% (datetime.datetime.now(), k.pid, pid_to_comm(k.pid), k.socktype,
k.laddr, k.lport, k.daddr, k.dport, recv_bytes, send_bytes, args.interval) )
else:
print("%-6d %-12.12s %-6.6s %-21s %-21s %9d %9d" % (k.pid,
pid_to_comm(k.pid), _to_socktype(k.socktype),
k.laddr + ":" + str(k.lport),
k.daddr + ":" + str(k.dport),
recv_bytes, send_bytes))
i += 1
# IPv6: build dict of all seen keys
ipv6_throughput = defaultdict(lambda: [0, 0])
for k, v in ipv6_send_bytes.items():
key = get_ipv6_session_key(k)
ipv6_throughput[key][0] = v.value
ipv6_send_bytes.clear()
for k, v in ipv6_recv_bytes.items():
key = get_ipv6_session_key(k)
ipv6_throughput[key][1] = v.value
ipv6_recv_bytes.clear()
if ipv6_throughput:
# more than 80 chars, sadly.
print("\n%-6s %-12s %-6s %-32s %-32s %9s %9s" % ("PID", "COMM", "TYPE",
"LADDR6", "RADDR6", "RX_Byte", "TX_Byte"))
# output
i = 0
for k, (send_bytes, recv_bytes) in (ipv6_throughput.items()
if args.count==-1 else sorted(ipv6_throughput.items(),
key=lambda kv: sum(kv[1]),
reverse=True)):
if args.count > 0 and i >= args.count:
break
if output_db:
sqlexecute(csr, """insert into
ipv6_metrics (epoch, pid, comm, type, laddr6, lport, raddr6, rport, rx_byte, tx_byte, interval_sum)
values ('%s', %d, '%s', %d, '%s', %d, '%s', %d, %d, %d, %d)"""
% (datetime.datetime.now(), k.pid, pid_to_comm(k.pid), k.socktype,
k.laddr, k.lport, k.daddr, k.dport, recv_bytes, send_bytes, args.interval) )
else:
print("%-6d %-12.12s %-6.6s %-32s %-32s %9d %9d" % (k.pid,
pid_to_comm(k.pid), _to_socktype(k.socktype),
k.laddr + ":" + str(k.lport),
k.daddr + ":" + str(k.dport),
recv_bytes, send_bytes))
i += 1
if output_db:
i_reconnect += 1
| [
"collections.namedtuple",
"argparse.ArgumentParser",
"socket.inet_ntop",
"argparse.ArgumentTypeError",
"taos.connect",
"time.sleep",
"struct.pack",
"datetime.datetime.now",
"collections.defaultdict",
"json.load",
"os.stat",
"bcc.BPF"
] | [((1354, 1517), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Summarize Sock send/recv throughput by host"""', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'epilog': 'examples'}), "(description=\n 'Summarize Sock send/recv throughput by host', formatter_class=argparse\n .RawDescriptionHelpFormatter, epilog=examples)\n", (1377, 1517), False, 'import argparse\n'), ((7067, 7153), 'collections.namedtuple', 'namedtuple', (['"""SockSession"""', "['pid', 'laddr', 'lport', 'daddr', 'dport', 'socktype']"], {}), "('SockSession', ['pid', 'laddr', 'lport', 'daddr', 'dport',\n 'socktype'])\n", (7077, 7153), False, 'from collections import namedtuple, defaultdict\n'), ((9443, 9461), 'bcc.BPF', 'BPF', ([], {'text': 'bpf_text'}), '(text=bpf_text)\n', (9446, 9461), False, 'from bcc import BPF\n'), ((2516, 2539), 'os.stat', 'os.stat', (['args.conf_file'], {}), '(args.conf_file)\n', (2523, 2539), False, 'import os\n'), ((8163, 8286), 'taos.connect', 'taos.connect', ([], {'host': "cnfdict['dbhost']", 'user': "cnfdict['user']", 'password': "cnfdict['password']", 'database': "cnfdict['database']"}), "(host=cnfdict['dbhost'], user=cnfdict['user'], password=cnfdict\n ['password'], database=cnfdict['database'])\n", (8175, 8286), False, 'import taos\n'), ((10880, 10908), 'collections.defaultdict', 'defaultdict', (['(lambda : [0, 0])'], {}), '(lambda : [0, 0])\n', (10891, 10908), False, 'from collections import namedtuple, defaultdict\n'), ((12537, 12565), 'collections.defaultdict', 'defaultdict', (['(lambda : [0, 0])'], {}), '(lambda : [0, 0])\n', (12548, 12565), False, 'from collections import namedtuple, defaultdict\n'), ((821, 852), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (847, 852), False, 'import argparse\n'), ((1021, 1052), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['msg'], {}), '(msg)\n', (1047, 1052), False, 'import argparse\n'), ((2230, 2249), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (2239, 2249), False, 'import json\n'), ((10371, 10391), 'time.sleep', 'sleep', (['args.interval'], {}), '(args.interval)\n', (10376, 10391), False, 'from time import sleep, strftime\n'), ((7903, 7931), 'socket.inet_ntop', 'inet_ntop', (['AF_INET6', 'k.saddr'], {}), '(AF_INET6, k.saddr)\n', (7912, 7931), False, 'from socket import inet_ntop, AF_INET, AF_INET6\n'), ((8004, 8032), 'socket.inet_ntop', 'inet_ntop', (['AF_INET6', 'k.daddr'], {}), '(AF_INET6, k.daddr)\n', (8013, 8032), False, 'from socket import inet_ntop, AF_INET, AF_INET6\n'), ((7587, 7605), 'struct.pack', 'pack', (['"""I"""', 'k.saddr'], {}), "('I', k.saddr)\n", (7591, 7605), False, 'from struct import pack\n'), ((7698, 7716), 'struct.pack', 'pack', (['"""I"""', 'k.daddr'], {}), "('I', k.daddr)\n", (7702, 7716), False, 'from struct import pack\n'), ((12018, 12041), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12039, 12041), False, 'import datetime\n'), ((13700, 13723), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13721, 13723), False, 'import datetime\n')] |
#!/usr/bin/env python3
import argparse
import csv
from logging import error, warning
import requests
import urllib3
import act
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def parseargs():
""" Parse arguments """
parser = argparse.ArgumentParser(
description='Get Threat Actors (MISP Galaxy)')
parser.add_argument(
'--userid',
dest='user_id',
required=True,
help="User ID")
parser.add_argument(
'--act-baseurl',
dest='act_baseurl',
required=True,
help='ACT API URI')
parser.add_argument(
"--logfile",
dest="log_file",
help="Log to file (default = stdout)")
parser.add_argument(
"--loglevel",
dest="log_level",
default="info",
help="Loglevel (default = info)")
return parser.parse_args()
def get_misp_threat_actors():
url = "https://raw.githubusercontent.com/MISP/misp-galaxy/master/clusters/threat-actor.json"
r = requests.get(url, verify=False)
return r.json()
def countrylist():
url = "http://download.geonames.org/export/dump/countryInfo.txt"
r = requests.get(url, verify=False)
countries = {
"iso": {},
"iso3": {},
"fips": {}
}
for row in csv.reader(
[line for line in r.text.splitlines() if line[0] != '#'],
delimiter='\t'):
countries["iso"][row[0]] = row[4]
countries["iso3"][row[1]] = row[4]
countries["fips"][row[3]] = row[4]
return countries
def add_to_act(client, ta_list):
countries = countrylist()
for ta in ta_list["values"]:
name = ta["value"]
if "meta" not in ta:
warning("Missing meta information in MISP on Threat Actor {}".format(name))
continue
aliases = ta["meta"].get("synonyms", [])
country = ta["meta"].get("country", None)
location = None
if country and country in countries["iso"]:
location = countries["iso"][country]
elif country and country in countries["iso3"]:
location = countries["iso3"][country]
error(
"country code is not valid ISO code, but found match in iso3: %s\n" %
country)
elif country and country in countries["fips"]:
location = countries["fips"][country]
error(
"country code is not valid ISO code, but found match in fips3: %s\n" %
country)
else:
location = None
if location:
client.fact("sourceGeography")\
.destination("location", location)\
.source("threatActor", name)\
.add()
elif country:
warning(
"country code not found in ISO, ISO3 or FIPS: %s\n" %
country)
# Loop over all items under indicators in report
for alias in aliases:
if alias == name:
continue # Do not alias to ourself
client.fact("threatActorAlias")\
.bidirectional("threatActor", alias, "threatActor", name)\
.add()
if __name__ == '__main__':
args = parseargs()
client = act.Act(
args.act_baseurl,
args.user_id,
args.log_level,
args.log_file,
"misp-threat-actors")
# Get all reports from SCIO
ta = get_misp_threat_actors()
# Add IOCs from reports to the ACT platform
add_to_act(client, ta)
| [
"argparse.ArgumentParser",
"logging.warning",
"requests.get",
"urllib3.disable_warnings",
"logging.error",
"act.Act"
] | [((131, 198), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (155, 198), False, 'import urllib3\n'), ((259, 329), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get Threat Actors (MISP Galaxy)"""'}), "(description='Get Threat Actors (MISP Galaxy)')\n", (282, 329), False, 'import argparse\n'), ((1010, 1041), 'requests.get', 'requests.get', (['url'], {'verify': '(False)'}), '(url, verify=False)\n', (1022, 1041), False, 'import requests\n'), ((1160, 1191), 'requests.get', 'requests.get', (['url'], {'verify': '(False)'}), '(url, verify=False)\n', (1172, 1191), False, 'import requests\n'), ((3262, 3358), 'act.Act', 'act.Act', (['args.act_baseurl', 'args.user_id', 'args.log_level', 'args.log_file', '"""misp-threat-actors"""'], {}), "(args.act_baseurl, args.user_id, args.log_level, args.log_file,\n 'misp-threat-actors')\n", (3269, 3358), False, 'import act\n'), ((2161, 2249), 'logging.error', 'error', (["('country code is not valid ISO code, but found match in iso3: %s\\n' % country)"], {}), "('country code is not valid ISO code, but found match in iso3: %s\\n' %\n country)\n", (2166, 2249), False, 'from logging import error, warning\n'), ((2779, 2849), 'logging.warning', 'warning', (["('country code not found in ISO, ISO3 or FIPS: %s\\n' % country)"], {}), "('country code not found in ISO, ISO3 or FIPS: %s\\n' % country)\n", (2786, 2849), False, 'from logging import error, warning\n'), ((2396, 2485), 'logging.error', 'error', (["('country code is not valid ISO code, but found match in fips3: %s\\n' % country\n )"], {}), "('country code is not valid ISO code, but found match in fips3: %s\\n' %\n country)\n", (2401, 2485), False, 'from logging import error, warning\n')] |
"""
`icclim.models.frequency` wraps the concept of pandas frequency in order to resample
time series. `slice_mode` paramater of `icclim.index` is always converted to a
`Frequency`.
"""
import datetime
from enum import Enum
from typing import Any, Callable, List, Optional, Tuple, Union
import cftime
import numpy as np
import pandas as pd
import xarray as xr
from xarray.core.dataarray import DataArray
from icclim.icclim_exceptions import InvalidIcclimArgumentError
SliceMode = Union[Any, str, List[Union[str, Tuple, int]]]
def seasons_resampler(
month_list: List[int],
) -> Callable[[DataArray], Tuple[DataArray, DataArray]]:
"""
Seasonal resampling method generator.
Returns a callable of DataArray which will resample the data to
the a season composed of the given month.
It also attached the corresponding time_bounds.
Parameters
----------
month_list : List[int]
List of month identified by `{1..12}`.
Returns
-------
function: Callable[[DataArray], DataArray]
function resampling the input da to the wanted season.
"""
def resampler(da: DataArray) -> Tuple[DataArray, DataArray]:
da_years = np.unique(da.time.dt.year)
seasons_acc: List[DataArray] = []
time_bounds = []
new_time_axis = []
start_month = month_list[0]
end_month = month_list[-1]
filtered_da = month_filter(da, month_list)
# TODO, maybe raise a warning if the month_list is not made of consecutive month
# (case of user error)
for year in da_years:
if start_month > end_month:
int_year = year - 1
else:
int_year = year
first_time = filtered_da.time.values[0]
if isinstance(first_time, cftime.datetime):
start = cftime.datetime(
year, start_month, 1, calendar=first_time.calendar
)
end = cftime.datetime(
year, end_month + 1, 1, calendar=first_time.calendar
)
else:
start = pd.to_datetime(f"{int_year}-{start_month}")
end = pd.to_datetime(f"{year}-{end_month + 1}")
end = end - datetime.timedelta(days=1)
season = filtered_da.sel(time=slice(start, end)).sum("time")
new_time_axis.append(start + (end - start) / 2)
time_bounds.append([start, end])
seasons_acc.append(season)
seasons = xr.concat(seasons_acc, "time")
seasons.coords["time"] = ("time", new_time_axis)
time_bounds_da = DataArray(
data=time_bounds,
dims=["time", "bounds"],
coords=[("time", seasons.time.values), ("bounds", [0, 1])],
)
return seasons, time_bounds_da
return resampler
def month_filter(da: DataArray, month_list: List[int]) -> DataArray:
return da.sel(time=da.time.dt.month.isin(month_list))
def _add_time_bounds(freq: str) -> Callable[[DataArray], Tuple[DataArray, DataArray]]:
def add_bounds(da: DataArray) -> Tuple[DataArray, DataArray]:
# da should already be resampled to freq
if isinstance(da.indexes.get("time"), xr.CFTimeIndex):
offset = xr.coding.cftime_offsets.to_offset(freq)
start = np.array(
[
cftime.datetime(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
calendar=date.calendar,
)
for date in da.indexes.get("time")
]
)
end = start + offset
end = end - datetime.timedelta(days=1)
else:
offset = pd.tseries.frequencies.to_offset(freq)
start = pd.to_datetime(da.time.dt.floor("D"))
end = start + offset
end = end - pd.Timedelta(days=1)
da["time"] = start + (end - start) / 2
time_bounds_da = DataArray(
data=list(zip(start, end)),
dims=["time", "bounds"],
coords=[("time", da.time.values), ("bounds", [0, 1])],
)
return da, time_bounds_da
return add_bounds
class Frequency(Enum):
"""
The sampling frequency of the resulting dataset.
"""
MONTH = ("MS", ["month", "MS"], "monthly time series", _add_time_bounds("MS"))
""" Resample to monthly values"""
AMJJAS = (
"MS",
["AMJJAS"],
"summer half-year time series",
seasons_resampler([*range(4, 9)]),
)
""" Resample to summer half-year, from April to September included."""
ONDJFM = (
"MS",
["ONDJFM"],
"winter half-year time series",
seasons_resampler([10, 11, 12, 1, 2, 3]),
)
""" Resample to winter half-year, from October to March included."""
DJF = ("MS", ["DJF"], "winter time series", seasons_resampler([12, 1, 2]))
""" Resample to winter season, from December to February included."""
MAM = ("MS", ["MAM"], "spring time series", seasons_resampler([*range(3, 6)]))
""" Resample to spring season, from March to May included."""
JJA = ("MS", ["JJA"], "summer time series", seasons_resampler([*range(6, 9)]))
""" Resample to summer season, from June to Agust included."""
SON = ("MS", ["SON"], "autumn time series", seasons_resampler([*range(9, 12)]))
""" Resample to fall season, from September to November included."""
CUSTOM = ("MS", [], None, None)
""" Resample to custom values. Do not use as is, use `slice_mode` with month or season
keywords instead.
"""
YEAR = ("YS", ["year", "YS"], "annual time series", _add_time_bounds("YS"))
""" Resample to yearly values."""
def __init__(
self,
panda_time: str,
accepted_values: List[str],
description: Optional[str] = None,
post_processing: Optional[
Callable[[DataArray], Tuple[DataArray, DataArray]]
] = None,
):
self.panda_freq: str = panda_time
self.accepted_values: List[str] = accepted_values
self.description = description
self.post_processing = post_processing
@staticmethod
def lookup(slice_mode: SliceMode) -> Any:
if isinstance(slice_mode, Frequency):
return slice_mode
if isinstance(slice_mode, str):
return _get_frequency_from_string(slice_mode)
if isinstance(slice_mode, list):
return _get_frequency_from_list(slice_mode)
raise InvalidIcclimArgumentError(
f"Unknown frequency {slice_mode}."
f"Use a Frequency from {[f for f in Frequency]}"
)
def _get_frequency_from_string(slice_mode: str) -> Frequency:
for freq in Frequency:
if freq.name == slice_mode.upper() or slice_mode.upper() in map(
str.upper, freq.accepted_values
):
return freq
raise InvalidIcclimArgumentError(f"Unknown frequency {slice_mode}.")
def _get_frequency_from_list(slice_mode_list: List) -> Frequency:
if len(slice_mode_list) < 2:
raise InvalidIcclimArgumentError(
f"The given slice list {slice_mode_list}"
f" has a length of {len(slice_mode_list)}."
f" The maximum length here is 2."
)
sampling_freq = slice_mode_list[0]
months = slice_mode_list[1]
custom_freq = Frequency.CUSTOM
if sampling_freq == "month":
custom_freq.post_processing = lambda da: month_filter(da, months)
custom_freq.description = f"monthly time series (months: {months})"
elif sampling_freq == "season":
if months is Tuple:
rearranged_months = months[1] + months[0]
custom_freq.post_processing = seasons_resampler(rearranged_months)
custom_freq.description = (
f"seasonal time series (season: {rearranged_months})"
)
else:
custom_freq.post_processing = seasons_resampler(months)
custom_freq.description = f"seasonal time series (season: {months})"
else:
raise InvalidIcclimArgumentError(
f"Unknown frequency {slice_mode_list}. "
"The sampling frequency must be one of {'season', 'month'}"
)
return custom_freq
| [
"xarray.coding.cftime_offsets.to_offset",
"xarray.core.dataarray.DataArray",
"numpy.unique",
"pandas.Timedelta",
"xarray.concat",
"pandas.tseries.frequencies.to_offset",
"cftime.datetime",
"icclim.icclim_exceptions.InvalidIcclimArgumentError",
"datetime.timedelta",
"pandas.to_datetime"
] | [((7101, 7163), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode}."""'], {}), "(f'Unknown frequency {slice_mode}.')\n", (7127, 7163), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((1198, 1224), 'numpy.unique', 'np.unique', (['da.time.dt.year'], {}), '(da.time.dt.year)\n', (1207, 1224), True, 'import numpy as np\n'), ((2527, 2557), 'xarray.concat', 'xr.concat', (['seasons_acc', '"""time"""'], {}), "(seasons_acc, 'time')\n", (2536, 2557), True, 'import xarray as xr\n'), ((2640, 2756), 'xarray.core.dataarray.DataArray', 'DataArray', ([], {'data': 'time_bounds', 'dims': "['time', 'bounds']", 'coords': "[('time', seasons.time.values), ('bounds', [0, 1])]"}), "(data=time_bounds, dims=['time', 'bounds'], coords=[('time',\n seasons.time.values), ('bounds', [0, 1])])\n", (2649, 2756), False, 'from xarray.core.dataarray import DataArray\n'), ((6702, 6819), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode}.Use a Frequency from {[f for f in Frequency]}"""'], {}), "(\n f'Unknown frequency {slice_mode}.Use a Frequency from {[f for f in Frequency]}'\n )\n", (6728, 6819), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((3278, 3318), 'xarray.coding.cftime_offsets.to_offset', 'xr.coding.cftime_offsets.to_offset', (['freq'], {}), '(freq)\n', (3312, 3318), True, 'import xarray as xr\n'), ((3894, 3932), 'pandas.tseries.frequencies.to_offset', 'pd.tseries.frequencies.to_offset', (['freq'], {}), '(freq)\n', (3926, 3932), True, 'import pandas as pd\n'), ((8270, 8407), 'icclim.icclim_exceptions.InvalidIcclimArgumentError', 'InvalidIcclimArgumentError', (['f"""Unknown frequency {slice_mode_list}. The sampling frequency must be one of {{\'season\', \'month\'}}"""'], {}), '(\n f"Unknown frequency {slice_mode_list}. The sampling frequency must be one of {{\'season\', \'month\'}}"\n )\n', (8296, 8407), False, 'from icclim.icclim_exceptions import InvalidIcclimArgumentError\n'), ((1855, 1922), 'cftime.datetime', 'cftime.datetime', (['year', 'start_month', '(1)'], {'calendar': 'first_time.calendar'}), '(year, start_month, 1, calendar=first_time.calendar)\n', (1870, 1922), False, 'import cftime\n'), ((1983, 2052), 'cftime.datetime', 'cftime.datetime', (['year', '(end_month + 1)', '(1)'], {'calendar': 'first_time.calendar'}), '(year, end_month + 1, 1, calendar=first_time.calendar)\n', (1998, 2052), False, 'import cftime\n'), ((2133, 2176), 'pandas.to_datetime', 'pd.to_datetime', (['f"""{int_year}-{start_month}"""'], {}), "(f'{int_year}-{start_month}')\n", (2147, 2176), True, 'import pandas as pd\n'), ((2199, 2240), 'pandas.to_datetime', 'pd.to_datetime', (['f"""{year}-{end_month + 1}"""'], {}), "(f'{year}-{end_month + 1}')\n", (2213, 2240), True, 'import pandas as pd\n'), ((2265, 2291), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2283, 2291), False, 'import datetime\n'), ((3832, 3858), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3850, 3858), False, 'import datetime\n'), ((4048, 4068), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4060, 4068), True, 'import pandas as pd\n'), ((3387, 3500), 'cftime.datetime', 'cftime.datetime', (['date.year', 'date.month', 'date.day', 'date.hour', 'date.minute', 'date.second'], {'calendar': 'date.calendar'}), '(date.year, date.month, date.day, date.hour, date.minute,\n date.second, calendar=date.calendar)\n', (3402, 3500), False, 'import cftime\n')] |
# -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.ext.declarative import declarative_base
from zvt.api.data_type import Region, Provider, EntityType
from zvt.domain.quotes import StockKdataCommon
from zvt.contract.register import register_schema
KdataBase = declarative_base()
class Stock1monKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1mon_kdata'
class Stock1monHfqKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1mon_hfq_kdata'
class Stock1monBfqKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1mon_bfq_kdata'
register_schema(regions=[Region.CHN, Region.US],
providers={Region.CHN: [Provider.JoinQuant, Provider.BaoStock],
Region.US: [Provider.Yahoo]},
db_name='stock_1mon_kdata',
schema_base=KdataBase,
entity_type=EntityType.Stock)
register_schema(regions=[Region.CHN, Region.US],
providers={Region.CHN: [Provider.JoinQuant, Provider.BaoStock],
Region.US: [Provider.Yahoo]},
db_name='stock_1mon_hfq_kdata',
schema_base=KdataBase,
entity_type=EntityType.Stock)
register_schema(regions=[Region.CHN, Region.US],
providers={Region.CHN: [Provider.JoinQuant, Provider.BaoStock],
Region.US: [Provider.Yahoo]},
db_name='stock_1mon_bfq_kdata',
schema_base=KdataBase,
entity_type=EntityType.Stock)
# the __all__ is generated
__all__ = ['Stock1monKdata', 'Stock1monHfqKdata', 'Stock1monBfqKdata']
| [
"zvt.contract.register.register_schema",
"sqlalchemy.ext.declarative.declarative_base"
] | [((322, 340), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (338, 340), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((633, 870), 'zvt.contract.register.register_schema', 'register_schema', ([], {'regions': '[Region.CHN, Region.US]', 'providers': '{Region.CHN: [Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.\n Yahoo]}', 'db_name': '"""stock_1mon_kdata"""', 'schema_base': 'KdataBase', 'entity_type': 'EntityType.Stock'}), "(regions=[Region.CHN, Region.US], providers={Region.CHN: [\n Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.Yahoo]},\n db_name='stock_1mon_kdata', schema_base=KdataBase, entity_type=\n EntityType.Stock)\n", (648, 870), False, 'from zvt.contract.register import register_schema\n'), ((949, 1190), 'zvt.contract.register.register_schema', 'register_schema', ([], {'regions': '[Region.CHN, Region.US]', 'providers': '{Region.CHN: [Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.\n Yahoo]}', 'db_name': '"""stock_1mon_hfq_kdata"""', 'schema_base': 'KdataBase', 'entity_type': 'EntityType.Stock'}), "(regions=[Region.CHN, Region.US], providers={Region.CHN: [\n Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.Yahoo]},\n db_name='stock_1mon_hfq_kdata', schema_base=KdataBase, entity_type=\n EntityType.Stock)\n", (964, 1190), False, 'from zvt.contract.register import register_schema\n'), ((1269, 1510), 'zvt.contract.register.register_schema', 'register_schema', ([], {'regions': '[Region.CHN, Region.US]', 'providers': '{Region.CHN: [Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.\n Yahoo]}', 'db_name': '"""stock_1mon_bfq_kdata"""', 'schema_base': 'KdataBase', 'entity_type': 'EntityType.Stock'}), "(regions=[Region.CHN, Region.US], providers={Region.CHN: [\n Provider.JoinQuant, Provider.BaoStock], Region.US: [Provider.Yahoo]},\n db_name='stock_1mon_bfq_kdata', schema_base=KdataBase, entity_type=\n EntityType.Stock)\n", (1284, 1510), False, 'from zvt.contract.register import register_schema\n')] |
"""
Test grades.py
"""
# Standard library imports
from unittest.mock import patch
# Third-party library imports
import pytest
class TestDataIsRetrievedCorrectly:
@staticmethod
def test_count_finished_modules(local_grades):
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 100, "level": 4},
"Module 2": {"module_score": 0, "level": 4},
"Module 3": {"module_score": 80, "level": 4},
"Module 4": {"module_score": 75.5, "level": 4},
"Module 6": {"module_score": -1, "level": 4},
"Module 5": {"level": 4},
"Module 7": {},
},
clear=True,
):
# All are valid except `Module 5` which doesn't have a score
# `0` means FAILED, `-1` means we got recognition of prior learning
assert local_grades.get_num_of_finished_modules() == 5
@staticmethod
def test_get_list_of_finished_modules(local_grades):
expected_list = [
{"Module 1": {"module_score": 100, "level": 4}},
{"Module 2": {"module_score": -1, "level": 4}},
{"Module 3": {"module_score": 80, "level": 4}},
{"Module 4": {"module_score": 75.5, "level": 5}},
{"Module 5": {"module_score": 0, "level": 4}},
]
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 100, "level": 4},
"Module 2": {"module_score": -1, "level": 4},
"Module 3": {"module_score": 80, "level": 4},
"Module 4": {"module_score": 75.5, "level": 5},
"Module 5": {"module_score": 0, "level": 4},
"Module 6": {"level": 4},
"Module 7": {},
},
clear=True,
):
# `0` means the module was FAILED. `-1` means the module was
# not taken but has been recognized through prior learning, so
# it is also considered done.
assert local_grades.get_list_of_finished_modules() == expected_list
@staticmethod
def test_get_list_of_modules_in_progress(local_grades):
local_grades.data["Algorithms and Data Structures I"] = {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"module_score": None,
"level": 4,
}
local_grades.data["Agile Software Projects"] = {
"final_score": None,
"final_weight": 70,
"midterm_score": 60,
"midterm_weight": 30,
"module_score": None,
"level": 5,
}
local_grades.data["Algorithms and Data Structures II"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 75,
"midterm_weight": 50,
"module_score": None,
"level": 5,
}
expected_list = [
{
"Algorithms and Data Structures I": {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"level": 4,
}
},
{
"Agile Software Projects": {
"midterm_score": 60,
"midterm_weight": 30,
"level": 5,
}
},
{
"Algorithms and Data Structures II": {
"midterm_score": 75,
"midterm_weight": 50,
"level": 5,
}
},
]
assert local_grades.get_list_of_modules_in_progress() == expected_list
@staticmethod
def test_get_scores_of_modules_in_progress(local_grades):
local_grades.data["Algorithms and Data Structures I"] = {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"module_score": None,
"level": 4,
}
local_grades.data["Agile Software Projects"] = {
"final_score": None,
"final_weight": 70,
"midterm_score": 60,
"midterm_weight": 30,
"module_score": None,
"level": 5,
}
local_grades.data["Algorithms and Data Structures II"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 75,
"midterm_weight": 50,
"module_score": None,
"level": 5,
}
results = local_grades.get_scores_of_modules_in_progress()
assert results == [69.2, 60, 75]
@staticmethod
def test_get_module_scores_of_finished_modules(local_grades):
expected_list = [100, 80, 75.5, 0]
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 100, "level": 4},
"Module 3": {"module_score": 80, "level": 4},
"Module 4": {"module_score": 75.5, "level": 4},
"Module 6": {"module_score": 0, "level": 4},
"Module 2": {"module_score": -1},
"Module 5": {"level": 4},
"Module 7": {},
},
clear=True,
):
assert (
local_grades.get_module_scores_of_finished_modules()
== expected_list
)
class TestDataIsCalculatedWell:
@staticmethod
def test_unweighted_average(local_grades):
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 100, "level": 4},
"Module 2": {"module_score": -1, "level": 5},
"Module 3": {"module_score": 80, "level": 6},
"Module 4": {"module_score": 79.7, "level": 4},
"Module 5": {"level": 4},
"Module 6": {"module_score": 0},
"Module 7": {},
},
clear=True,
):
assert local_grades.unweighted_average == 86.57
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 97.23, "level": 4},
"Module 2": {"module_score": 93.58, "level": 4},
"Module 3": {"module_score": 91.11, "level": 4},
"Module 4": {},
"Module 5": {"level": 4},
},
clear=True,
):
assert local_grades.unweighted_average == 93.97
with patch.dict(
local_grades.data,
{},
clear=True,
):
assert local_grades.unweighted_average == 0
@staticmethod
def test_weighted_average(
local_grades,
):
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 100, "level": 4},
"Module 2": {"module_score": -1, "level": 4},
"Module 3": {"module_score": 80},
"Module 4": {"module_score": 79.7, "level": 5},
"Module 5": {"level": 4},
"Module 6": {"module_score": 0},
"Module 7": {},
},
clear=True,
):
# skip module 3: `level` is expected
assert local_grades.weighted_average == 84.78
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 97.23, "level": 4},
"Module 2": {"module_score": 93.58, "level": 5},
"Module 3": {"module_score": 91.11, "level": 6},
"Module 4": {},
"Module 5": {"level": 4},
},
clear=True,
):
assert local_grades.weighted_average == 92.61
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 97.23, "level": 4},
"Module 2": {"module_score": 93.58, "level": 5},
"Final Project": {"module_score": 89, "level": 6},
"Module 4": {},
"Module 5": {"level": 4},
},
clear=True,
):
# weight of "Final Project" is twice that of another module level 6
# (97.23 + 93.58 * 3 + 89 * 10) / 14
assert local_grades.weighted_average == 90.57
with patch.dict(
local_grades.data,
{},
clear=True,
):
assert local_grades.weighted_average == 0
@staticmethod
def test_calculate_unweighted_average_in_progress(
local_grades,
):
# in progress: [69.2, 60, 75]
local_grades.data["Algorithms and Data Structures I"] = {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"module_score": None,
"level": 4,
}
local_grades.data["Agile Software Projects"] = {
"final_score": None,
"final_weight": 70,
"midterm_score": 60,
"midterm_weight": 30,
"module_score": None,
"level": 5,
}
local_grades.data["Algorithms and Data Structures II"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 75,
"midterm_weight": 50,
"module_score": None,
"level": 5,
}
# finished : [80, 82, 85]
local_grades.data["How Computers Work"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 60,
"midterm_weight": 50,
"module_score": 80,
"level": 4,
}
local_grades.data["Introduction to Programming I"] = {
"final_score": 80,
"final_weight": 50,
"midterm_score": 60,
"midterm_weight": 50,
"module_score": 82,
"level": 4,
}
local_grades.data["Computational Mathematics"] = {
"final_score": 80,
"final_weight": 50,
"midterm_score": None,
"midterm_weight": 50,
"module_score": 85,
"level": 4,
}
# average of all that: 75.67
result = local_grades.unweighted_average_including_in_progress
assert result == 75.2
@staticmethod
def test_weighted_average_in_progress(
local_grades,
):
# in progress: [69.2, 60, 75], respectively [L4, L5, L5]
local_grades.data["Algorithms and Data Structures I"] = {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"module_score": None,
"level": 4,
}
local_grades.data["Agile Software Projects"] = {
"final_score": None,
"final_weight": 70,
"midterm_score": 60,
"midterm_weight": 30,
"module_score": None,
"level": 5,
}
local_grades.data["Algorithms and Data Structures II"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 75,
"midterm_weight": 50,
"module_score": None,
"level": 5,
}
# finished : [80, 82, 85], respectively [L4, L4, L4]
local_grades.data["How Computers Work"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 60,
"midterm_weight": 50,
"module_score": 80,
"level": 4,
}
local_grades.data["Introduction to Programming I"] = {
"final_score": 80,
"final_weight": 50,
"midterm_score": 60,
"midterm_weight": 50,
"module_score": 82,
"level": 4,
}
local_grades.data["Computational Mathematics"] = {
"final_score": 80,
"final_weight": 50,
"midterm_score": None,
"midterm_weight": 50,
"module_score": 85,
"level": 4,
}
# weighted average of all that: 72.12
result = local_grades.weighted_average_in_progress
assert result == 72.12
@staticmethod
def test_weighted_average_in_progress_only(
local_grades,
):
# in progress: [69.2, 60, 75], respectively [L4, L5, L5]
local_grades.data["Algorithms and Data Structures I"] = {
"final_score": 65,
"final_weight": 70,
"midterm_score": 79,
"midterm_weight": 30,
"module_score": None,
"level": 4,
}
local_grades.data["Agile Software Projects"] = {
"final_score": None,
"final_weight": 70,
"midterm_score": 60,
"midterm_weight": 30,
"module_score": None,
"level": 5,
}
local_grades.data["Algorithms and Data Structures II"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 75,
"midterm_weight": 50,
"module_score": None,
"level": 5,
}
# finished : [80], [L4]
local_grades.data["How Computers Work"] = {
"final_score": None,
"final_weight": 50,
"midterm_score": 60,
"midterm_weight": 50,
"module_score": 80,
"level": 4,
}
# weighted average of modules in progress: 67.74
result = local_grades.weighted_average_in_progress_only
assert result == 67.74
@staticmethod
def test_get_module_scores_of_finished_modules_for_system_us(local_grades):
expected_module_scores = {
"Module 1": "A",
"Module 2": "N/A",
"Module 3": "B",
"Module 4": "C",
"Module 5": "F",
"Module 6": "A",
"Module 7": "D",
"Module 8": "A-",
"Module 9": "B+",
"Module 10": "B-",
"Module 11": "C+",
"Module 12": "C-",
"Module 13": "D+",
"Module 14": "D-",
}
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 95.2, "level": 4},
"Module 2": {
"module_score": -1,
"level": 4,
}, # not counted: i.e. N/A
"Module 3": {"module_score": 85, "level": 4},
"Module 4": {"module_score": 74.2, "level": 4},
"Module 5": {"module_score": 59.2, "level": 4},
"Module 6": {"module_score": 100, "level": 4},
"Module 7": {"module_score": 64.5, "level": 4},
"Module 8": {"module_score": 91, "level": 4},
"Module 9": {"module_score": 88.7, "level": 5},
"Module 10": {"module_score": 81.4, "level": 5},
"Module 11": {"module_score": 79, "level": 5},
"Module 12": {"module_score": 70, "level": 5},
"Module 13": {"module_score": 67.1, "level": 5},
"Module 14": {"module_score": 61, "level": 5},
},
clear=True,
):
out = (
local_grades.get_module_scores_of_finished_modules_for_system(
system="US"
)
)
assert out == expected_module_scores
@staticmethod
def test_get_scores_of_modules_in_progress_for_system_us(local_grades):
expected_module_scores = {
"Module 1": "A",
"Module 3": "B",
"Module 4": "C",
}
with patch.dict(
local_grades.data,
{
"Module 1": {
"final_score": 95.2,
"final_weight": 70,
"level": 4,
},
"Module 2": {
"module_score": -1,
"level": 4,
}, # not counted: i.e. N/A
"Module 3": {
"midterm_score": 85.5,
"midterm_weight": 50,
"level": 4,
},
"Module 4": {
"final_score": 70,
"final_weight": 70,
"midterm_score": 80,
"midterm_weight": 30,
"level": 4,
},
"Module 5": {
"module_score": 59.2,
"level": 4,
}, # not counted: module_score is present
"Module 6": {
"module_score": 59.2,
"final_score": 59.2,
"final_weight": 70,
"midterm_score": 59.2,
"midterm_weight": 30,
"level": 4,
}, # not counted: module_score is present
},
clear=True,
):
out = local_grades.get_scores_of_modules_in_progress_for_system(
system="US"
)
assert out == expected_module_scores
@staticmethod
def test_get_scores_of_modules_in_progress_for_system_ects(
local_grades,
):
expected_module_scores = {
"Module 1": "A",
"Module 3": "A",
"Module 4": "C",
}
with patch.dict(
local_grades.data,
{
"Module 1": {
"final_score": 95.2,
"final_weight": 70,
"level": 4,
},
"Module 2": {
"module_score": -1,
"level": 4,
}, # not counted: i.e. N/A
"Module 3": {
"midterm_score": 85.5,
"midterm_weight": 50,
"level": 4,
},
"Module 4": {
"final_score": 55,
"final_weight": 70,
"midterm_score": 54,
"midterm_weight": 30,
"level": 4,
},
"Module 5": {
"module_score": 59.2,
"level": 4,
}, # not counted: module_score is present
"Module 6": {
"module_score": 59.2,
"final_score": 59.2,
"final_weight": 70,
"midterm_score": 59.2,
"midterm_weight": 30,
"level": 4,
}, # not counted: module_score is present
},
clear=True,
):
out = local_grades.get_scores_of_modules_in_progress_for_system(
system="ECTS"
)
assert out == expected_module_scores
@staticmethod
def test_get_module_scores_of_finished_modules_for_system_ects(
local_grades,
):
expected_module_scores = {
"Module 1": "A",
"Module 2": "N/A",
"Module 3": "B",
"Module 4": "C",
"Module 5": "E/F",
"Module 6": "A",
"Module 7": "D",
}
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": 95.2, "level": 4},
"Module 2": {"module_score": -1, "level": 4}, # not counted
"Module 3": {"module_score": 60, "level": 4},
"Module 4": {"module_score": 50.3, "level": 4},
"Module 5": {"module_score": 0, "level": 4},
"Module 6": {"module_score": 100, "level": 4},
"Module 7": {"module_score": 41, "level": 4},
},
clear=True,
):
out = (
local_grades.get_module_scores_of_finished_modules_for_system(
system="ECTS"
)
)
assert out == expected_module_scores
@staticmethod
def test_total_credits(local_grades):
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": -1, "level": 4}, # counts as done
"Module 2": {"module_score": 80, "level": 5},
"Module 3": {"module_score": 70, "level": 6},
"Module 4": {}, # don't count when there's no data
},
clear=True,
):
assert local_grades.total_credits == 45
with patch.dict(
local_grades.data,
{
"Final Project": {"module_score": 80, "level": 6}
}, # counts double
clear=True,
):
assert local_grades.total_credits == 30
with patch.dict(
local_grades.data,
{
"final project": {
"module_score": 80,
"level": 6,
}, # make sure capitalization does not matter
},
clear=True,
):
assert local_grades.total_credits == 30
with patch.dict(
local_grades.data,
{
"Module 1": {"module_score": -1, "level": 5}, # counts as done
"Final Project": {
"module_score": 80,
"level": 6,
}, # counts double
"Module 3": {"module_score": 90.5, "level": 5},
"Module 4": {
"level": 5
}, # don't count when there's no module_score
},
clear=True,
):
assert local_grades.total_credits == 60
with patch.dict(
local_grades.data,
{
# do not count failed attempts
"Module 2": {"module_score": 34, "level": 4},
"Module 3": {"module_score": 90.5, "level": 4},
"Module 4": {"level": 4},
},
clear=True,
):
assert local_grades.total_credits == 15
@staticmethod
@pytest.mark.parametrize(
"num_credits,exp_percentage",
[
(-100, -1),
(-2, -1),
(-1, -1),
(0, 0),
(15, 4.17),
(30, 8.33),
(60, 16.67),
(135, 37.5),
(240, 66.67),
(300, 83.33),
(360, 100),
(375, 100), # can't have more than 360 credits, cap it
],
)
def test_get_percentage_degree_done(
local_grades, num_credits, exp_percentage
):
assert (
local_grades.get_percentage_degree_done(num_credits)
== exp_percentage
)
| [
"pytest.mark.parametrize",
"unittest.mock.patch.dict"
] | [((22455, 22661), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_credits,exp_percentage"""', '[(-100, -1), (-2, -1), (-1, -1), (0, 0), (15, 4.17), (30, 8.33), (60, 16.67\n ), (135, 37.5), (240, 66.67), (300, 83.33), (360, 100), (375, 100)]'], {}), "('num_credits,exp_percentage', [(-100, -1), (-2, -1),\n (-1, -1), (0, 0), (15, 4.17), (30, 8.33), (60, 16.67), (135, 37.5), (\n 240, 66.67), (300, 83.33), (360, 100), (375, 100)])\n", (22478, 22661), False, 'import pytest\n'), ((247, 582), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 100, 'level': 4}, 'Module 2': {'module_score':\n 0, 'level': 4}, 'Module 3': {'module_score': 80, 'level': 4},\n 'Module 4': {'module_score': 75.5, 'level': 4}, 'Module 6': {\n 'module_score': -1, 'level': 4}, 'Module 5': {'level': 4}, 'Module 7': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 100, 'level': 4\n }, 'Module 2': {'module_score': 0, 'level': 4}, 'Module 3': {\n 'module_score': 80, 'level': 4}, 'Module 4': {'module_score': 75.5,\n 'level': 4}, 'Module 6': {'module_score': -1, 'level': 4}, 'Module 5':\n {'level': 4}, 'Module 7': {}}, clear=True)\n", (257, 582), False, 'from unittest.mock import patch\n'), ((1387, 1723), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 100, 'level': 4}, 'Module 2': {'module_score':\n -1, 'level': 4}, 'Module 3': {'module_score': 80, 'level': 4},\n 'Module 4': {'module_score': 75.5, 'level': 5}, 'Module 5': {\n 'module_score': 0, 'level': 4}, 'Module 6': {'level': 4}, 'Module 7': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 100, 'level': 4\n }, 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'module_score': 80, 'level': 4}, 'Module 4': {'module_score': 75.5,\n 'level': 5}, 'Module 5': {'module_score': 0, 'level': 4}, 'Module 6': {\n 'level': 4}, 'Module 7': {}}, clear=True)\n", (1397, 1723), False, 'from unittest.mock import patch\n'), ((4970, 5293), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 100, 'level': 4}, 'Module 3': {'module_score':\n 80, 'level': 4}, 'Module 4': {'module_score': 75.5, 'level': 4},\n 'Module 6': {'module_score': 0, 'level': 4}, 'Module 2': {\n 'module_score': -1}, 'Module 5': {'level': 4}, 'Module 7': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 100, 'level': 4\n }, 'Module 3': {'module_score': 80, 'level': 4}, 'Module 4': {\n 'module_score': 75.5, 'level': 4}, 'Module 6': {'module_score': 0,\n 'level': 4}, 'Module 2': {'module_score': -1}, 'Module 5': {'level': 4},\n 'Module 7': {}}, clear=True)\n", (4980, 5293), False, 'from unittest.mock import patch\n'), ((5700, 6023), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 100, 'level': 4}, 'Module 2': {'module_score':\n -1, 'level': 5}, 'Module 3': {'module_score': 80, 'level': 6},\n 'Module 4': {'module_score': 79.7, 'level': 4}, 'Module 5': {'level': 4\n }, 'Module 6': {'module_score': 0}, 'Module 7': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 100, 'level': 4\n }, 'Module 2': {'module_score': -1, 'level': 5}, 'Module 3': {\n 'module_score': 80, 'level': 6}, 'Module 4': {'module_score': 79.7,\n 'level': 4}, 'Module 5': {'level': 4}, 'Module 6': {'module_score': 0},\n 'Module 7': {}}, clear=True)\n", (5710, 6023), False, 'from unittest.mock import patch\n'), ((6254, 6500), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 97.23, 'level': 4}, 'Module 2': {\n 'module_score': 93.58, 'level': 4}, 'Module 3': {'module_score': 91.11,\n 'level': 4}, 'Module 4': {}, 'Module 5': {'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 97.23, 'level':\n 4}, 'Module 2': {'module_score': 93.58, 'level': 4}, 'Module 3': {\n 'module_score': 91.11, 'level': 4}, 'Module 4': {}, 'Module 5': {\n 'level': 4}}, clear=True)\n", (6264, 6500), False, 'from unittest.mock import patch\n'), ((6703, 6748), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', '{}'], {'clear': '(True)'}), '(local_grades.data, {}, clear=True)\n', (6713, 6748), False, 'from unittest.mock import patch\n'), ((6945, 7256), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 100, 'level': 4}, 'Module 2': {'module_score':\n -1, 'level': 4}, 'Module 3': {'module_score': 80}, 'Module 4': {\n 'module_score': 79.7, 'level': 5}, 'Module 5': {'level': 4}, 'Module 6':\n {'module_score': 0}, 'Module 7': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 100, 'level': 4\n }, 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'module_score': 80}, 'Module 4': {'module_score': 79.7, 'level': 5},\n 'Module 5': {'level': 4}, 'Module 6': {'module_score': 0}, 'Module 7':\n {}}, clear=True)\n", (6955, 7256), False, 'from unittest.mock import patch\n'), ((7534, 7780), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 97.23, 'level': 4}, 'Module 2': {\n 'module_score': 93.58, 'level': 5}, 'Module 3': {'module_score': 91.11,\n 'level': 6}, 'Module 4': {}, 'Module 5': {'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 97.23, 'level':\n 4}, 'Module 2': {'module_score': 93.58, 'level': 5}, 'Module 3': {\n 'module_score': 91.11, 'level': 6}, 'Module 4': {}, 'Module 5': {\n 'level': 4}}, clear=True)\n", (7544, 7780), False, 'from unittest.mock import patch\n'), ((7981, 8229), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 97.23, 'level': 4}, 'Module 2': {\n 'module_score': 93.58, 'level': 5}, 'Final Project': {'module_score': \n 89, 'level': 6}, 'Module 4': {}, 'Module 5': {'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 97.23, 'level':\n 4}, 'Module 2': {'module_score': 93.58, 'level': 5}, 'Final Project': {\n 'module_score': 89, 'level': 6}, 'Module 4': {}, 'Module 5': {'level': \n 4}}, clear=True)\n", (7991, 8229), False, 'from unittest.mock import patch\n'), ((8559, 8604), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', '{}'], {'clear': '(True)'}), '(local_grades.data, {}, clear=True)\n', (8569, 8604), False, 'from unittest.mock import patch\n'), ((14446, 15191), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 95.2, 'level': 4}, 'Module 2': {\n 'module_score': -1, 'level': 4}, 'Module 3': {'module_score': 85,\n 'level': 4}, 'Module 4': {'module_score': 74.2, 'level': 4}, 'Module 5':\n {'module_score': 59.2, 'level': 4}, 'Module 6': {'module_score': 100,\n 'level': 4}, 'Module 7': {'module_score': 64.5, 'level': 4}, 'Module 8':\n {'module_score': 91, 'level': 4}, 'Module 9': {'module_score': 88.7,\n 'level': 5}, 'Module 10': {'module_score': 81.4, 'level': 5},\n 'Module 11': {'module_score': 79, 'level': 5}, 'Module 12': {\n 'module_score': 70, 'level': 5}, 'Module 13': {'module_score': 67.1,\n 'level': 5}, 'Module 14': {'module_score': 61, 'level': 5}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 95.2, 'level': \n 4}, 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'module_score': 85, 'level': 4}, 'Module 4': {'module_score': 74.2,\n 'level': 4}, 'Module 5': {'module_score': 59.2, 'level': 4}, 'Module 6':\n {'module_score': 100, 'level': 4}, 'Module 7': {'module_score': 64.5,\n 'level': 4}, 'Module 8': {'module_score': 91, 'level': 4}, 'Module 9':\n {'module_score': 88.7, 'level': 5}, 'Module 10': {'module_score': 81.4,\n 'level': 5}, 'Module 11': {'module_score': 79, 'level': 5}, 'Module 12':\n {'module_score': 70, 'level': 5}, 'Module 13': {'module_score': 67.1,\n 'level': 5}, 'Module 14': {'module_score': 61, 'level': 5}}, clear=True)\n", (14456, 15191), False, 'from unittest.mock import patch\n'), ((15977, 16523), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'final_score': 95.2, 'final_weight': 70, 'level': 4},\n 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'midterm_score': 85.5, 'midterm_weight': 50, 'level': 4}, 'Module 4': {\n 'final_score': 70, 'final_weight': 70, 'midterm_score': 80,\n 'midterm_weight': 30, 'level': 4}, 'Module 5': {'module_score': 59.2,\n 'level': 4}, 'Module 6': {'module_score': 59.2, 'final_score': 59.2,\n 'final_weight': 70, 'midterm_score': 59.2, 'midterm_weight': 30,\n 'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'final_score': 95.2,\n 'final_weight': 70, 'level': 4}, 'Module 2': {'module_score': -1,\n 'level': 4}, 'Module 3': {'midterm_score': 85.5, 'midterm_weight': 50,\n 'level': 4}, 'Module 4': {'final_score': 70, 'final_weight': 70,\n 'midterm_score': 80, 'midterm_weight': 30, 'level': 4}, 'Module 5': {\n 'module_score': 59.2, 'level': 4}, 'Module 6': {'module_score': 59.2,\n 'final_score': 59.2, 'final_weight': 70, 'midterm_score': 59.2,\n 'midterm_weight': 30, 'level': 4}}, clear=True)\n", (15987, 16523), False, 'from unittest.mock import patch\n'), ((17718, 18264), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'final_score': 95.2, 'final_weight': 70, 'level': 4},\n 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'midterm_score': 85.5, 'midterm_weight': 50, 'level': 4}, 'Module 4': {\n 'final_score': 55, 'final_weight': 70, 'midterm_score': 54,\n 'midterm_weight': 30, 'level': 4}, 'Module 5': {'module_score': 59.2,\n 'level': 4}, 'Module 6': {'module_score': 59.2, 'final_score': 59.2,\n 'final_weight': 70, 'midterm_score': 59.2, 'midterm_weight': 30,\n 'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'final_score': 95.2,\n 'final_weight': 70, 'level': 4}, 'Module 2': {'module_score': -1,\n 'level': 4}, 'Module 3': {'midterm_score': 85.5, 'midterm_weight': 50,\n 'level': 4}, 'Module 4': {'final_score': 55, 'final_weight': 70,\n 'midterm_score': 54, 'midterm_weight': 30, 'level': 4}, 'Module 5': {\n 'module_score': 59.2, 'level': 4}, 'Module 6': {'module_score': 59.2,\n 'final_score': 59.2, 'final_weight': 70, 'midterm_score': 59.2,\n 'midterm_weight': 30, 'level': 4}}, clear=True)\n", (17728, 18264), False, 'from unittest.mock import patch\n'), ((19585, 19977), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': 95.2, 'level': 4}, 'Module 2': {\n 'module_score': -1, 'level': 4}, 'Module 3': {'module_score': 60,\n 'level': 4}, 'Module 4': {'module_score': 50.3, 'level': 4}, 'Module 5':\n {'module_score': 0, 'level': 4}, 'Module 6': {'module_score': 100,\n 'level': 4}, 'Module 7': {'module_score': 41, 'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': 95.2, 'level': \n 4}, 'Module 2': {'module_score': -1, 'level': 4}, 'Module 3': {\n 'module_score': 60, 'level': 4}, 'Module 4': {'module_score': 50.3,\n 'level': 4}, 'Module 5': {'module_score': 0, 'level': 4}, 'Module 6': {\n 'module_score': 100, 'level': 4}, 'Module 7': {'module_score': 41,\n 'level': 4}}, clear=True)\n", (19595, 19977), False, 'from unittest.mock import patch\n'), ((20433, 20639), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': -1, 'level': 4}, 'Module 2': {'module_score':\n 80, 'level': 5}, 'Module 3': {'module_score': 70, 'level': 6},\n 'Module 4': {}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': -1, 'level': 4},\n 'Module 2': {'module_score': 80, 'level': 5}, 'Module 3': {\n 'module_score': 70, 'level': 6}, 'Module 4': {}}, clear=True)\n", (20443, 20639), False, 'from unittest.mock import patch\n'), ((20877, 20975), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Final Project': {'module_score': 80, 'level': 6}}"], {'clear': '(True)'}), "(local_grades.data, {'Final Project': {'module_score': 80,\n 'level': 6}}, clear=True)\n", (20887, 20975), False, 'from unittest.mock import patch\n'), ((21132, 21230), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'final project': {'module_score': 80, 'level': 6}}"], {'clear': '(True)'}), "(local_grades.data, {'final project': {'module_score': 80,\n 'level': 6}}, clear=True)\n", (21142, 21230), False, 'from unittest.mock import patch\n'), ((21474, 21697), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 1': {'module_score': -1, 'level': 5}, 'Final Project': {\n 'module_score': 80, 'level': 6}, 'Module 3': {'module_score': 90.5,\n 'level': 5}, 'Module 4': {'level': 5}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 1': {'module_score': -1, 'level': 5},\n 'Final Project': {'module_score': 80, 'level': 6}, 'Module 3': {\n 'module_score': 90.5, 'level': 5}, 'Module 4': {'level': 5}}, clear=True)\n", (21484, 21697), False, 'from unittest.mock import patch\n'), ((22057, 22229), 'unittest.mock.patch.dict', 'patch.dict', (['local_grades.data', "{'Module 2': {'module_score': 34, 'level': 4}, 'Module 3': {'module_score':\n 90.5, 'level': 4}, 'Module 4': {'level': 4}}"], {'clear': '(True)'}), "(local_grades.data, {'Module 2': {'module_score': 34, 'level': 4},\n 'Module 3': {'module_score': 90.5, 'level': 4}, 'Module 4': {'level': 4\n }}, clear=True)\n", (22067, 22229), False, 'from unittest.mock import patch\n')] |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.fred.run import fred
def recipe_fred_series_to_bigquery(config, auth, fred_api_key, fred_series_id, fred_units, fred_frequency, fred_aggregation_method, project, dataset):
"""Download federal reserve series.
Args:
auth (authentication) - Credentials used for writing data.
fred_api_key (string) - 32 character alpha-numeric lowercase string.
fred_series_id (string) - Series ID to pull data from.
fred_units (choice) - A key that indicates a data value transformation.
fred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to.
fred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation.
project (string) - Existing BigQuery project.
dataset (string) - Existing BigQuery dataset.
"""
fred(config, {
'auth':auth,
'api_key':fred_api_key,
'frequency':fred_frequency,
'series':[
{
'series_id':fred_series_id,
'units':fred_units,
'aggregation_method':fred_aggregation_method
}
],
'out':{
'bigquery':{
'project':project,
'dataset':dataset
}
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Download federal reserve series.
1. Specify the values for a <a href='https://fred.stlouisfed.org/docs/api/fred/series_observations.html' target='_blank'>Fred observations API call</a>.
2. A table will appear in the dataset.
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth", help="Credentials used for writing data.", default='service')
parser.add_argument("-fred_api_key", help="32 character alpha-numeric lowercase string.", default='')
parser.add_argument("-fred_series_id", help="Series ID to pull data from.", default='')
parser.add_argument("-fred_units", help="A key that indicates a data value transformation.", default='lin')
parser.add_argument("-fred_frequency", help="An optional parameter that indicates a lower frequency to aggregate values to.", default='')
parser.add_argument("-fred_aggregation_method", help="A key that indicates the aggregation method used for frequency aggregation.", default='avg')
parser.add_argument("-project", help="Existing BigQuery project.", default='')
parser.add_argument("-dataset", help="Existing BigQuery dataset.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_fred_series_to_bigquery(config, args.auth, args.fred_api_key, args.fred_series_id, args.fred_units, args.fred_frequency, args.fred_aggregation_method, args.project, args.dataset)
| [
"starthinker.util.configuration.Configuration",
"textwrap.dedent",
"starthinker.task.fred.run.fred"
] | [((1909, 2178), 'starthinker.task.fred.run.fred', 'fred', (['config', "{'auth': auth, 'api_key': fred_api_key, 'frequency': fred_frequency,\n 'series': [{'series_id': fred_series_id, 'units': fred_units,\n 'aggregation_method': fred_aggregation_method}], 'out': {'bigquery': {\n 'project': project, 'dataset': dataset}}}"], {}), "(config, {'auth': auth, 'api_key': fred_api_key, 'frequency':\n fred_frequency, 'series': [{'series_id': fred_series_id, 'units':\n fred_units, 'aggregation_method': fred_aggregation_method}], 'out': {\n 'bigquery': {'project': project, 'dataset': dataset}}})\n", (1913, 2178), False, 'from starthinker.task.fred.run import fred\n'), ((4119, 4252), 'starthinker.util.configuration.Configuration', 'Configuration', ([], {'project': 'args.project', 'user': 'args.user', 'service': 'args.service', 'client': 'args.client', 'key': 'args.key', 'verbose': 'args.verbose'}), '(project=args.project, user=args.user, service=args.service,\n client=args.client, key=args.key, verbose=args.verbose)\n', (4132, 4252), False, 'from starthinker.util.configuration import Configuration\n'), ((2399, 2683), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Download federal reserve series.\n\n 1. Specify the values for a <a href=\'https://fred.stlouisfed.org/docs/api/fred/series_observations.html\' target=\'_blank\'>Fred observations API call</a>.\n 2. A table will appear in the dataset.\n """'], {}), '(\n """\n Download federal reserve series.\n\n 1. Specify the values for a <a href=\'https://fred.stlouisfed.org/docs/api/fred/series_observations.html\' target=\'_blank\'>Fred observations API call</a>.\n 2. A table will appear in the dataset.\n """\n )\n', (2414, 2683), False, 'import textwrap\n')] |
"""Day 10: Monitoring Station"""
from collections import defaultdict, deque
from functools import partial
from math import atan2, gcd, sqrt
from typing import DefaultDict, Deque, Iterable, Iterator, List, NamedTuple, Set, Tuple
import pytest
import aoc
DAY = 10
class Location(NamedTuple):
across: int
down: int
class Direction(NamedTuple):
across: int
down: int
class AsteroidObservation(NamedTuple):
asteroid: Location
asteroids_visible: int
class ParsedGrid(NamedTuple):
width: int
height: int
asteroids: Set[Location]
def parse_grid(grid: str) -> ParsedGrid:
grid_rows = grid.strip().splitlines()
height = len(grid_rows)
width = len(grid_rows[0])
asteroids = {
Location(across, down)
for down in range(height)
for across in range(width)
if grid_rows[down][across] == "#"
}
return ParsedGrid(width, height, asteroids)
def reduce_direction(unsimplified: Direction) -> Direction:
divisor = gcd(abs(unsimplified.across), abs(unsimplified.down))
return Direction(
int(unsimplified.across / divisor), int(unsimplified.down / divisor)
)
def relative_distance(source: Location, dest: Location) -> Direction:
return Direction(dest.across - source.across, dest.down - source.down)
def basic_direction(source: Location, dest: Location) -> Direction:
return reduce_direction(relative_distance(source, dest))
def find_best_spot_for_monitoring_station(grid: ParsedGrid) -> AsteroidObservation:
observations: List[AsteroidObservation] = []
for this in grid.asteroids:
directions = {
basic_direction(this, other) for other in grid.asteroids if other != this
}
observations.append(AsteroidObservation(this, len(directions)))
return max(observations, key=lambda ao: ao.asteroids_visible)
@pytest.mark.parametrize(
"grid_string,expected_observation",
[
# fmt: off
(
"""\
.#..#
.....
#####
....#
...##
""", AsteroidObservation(Location(3, 4), 8)
),
(
"""\
......#.#.
#..#.#....
..#######.
.#.#.###..
.#..#.....
..#....#.#
#..#....#.
.##.#..###
##...#..#.
.#....####
""", AsteroidObservation(Location(5, 8), 33)
),
(
"""\
#.#...#.#.
.###....#.
.#....#...
##.#.#.#.#
....#.#.#.
.##..###.#
..#...##..
..##....##
......#...
.####.###.
""", AsteroidObservation(Location(1, 2), 35)
),
(
"""\
.#..#..###
####.###.#
....###.#.
..###.##.#
##.##.#.#.
....###..#
..#.#..#.#
#..#.#.###
.##...##.#
.....#.#..
""", AsteroidObservation(Location(6, 3), 41)
),
(
"""\
.#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##
""", AsteroidObservation(Location(11, 13), 210)
)
# fmt: on
],
)
def test_find_best_spot_for_monitoring_station(
grid_string: str, expected_observation: int
) -> None:
parsed = parse_grid(grid_string)
assert find_best_spot_for_monitoring_station(parsed) == expected_observation
def distance(a: Location, b: Location) -> float:
return sqrt((b.across - a.across) ** 2 + (b.down - a.down) ** 2)
def direction_and_distance(
first: Location, second: Location
) -> Tuple[Direction, float]:
return (basic_direction(first, second), distance(first, second))
def clockwise_asteroid_queues(
centre: Location, asteroids: Iterable[Location]
) -> List[Deque[Location]]:
direction_and_distance_from_centre = partial(direction_and_distance, centre)
# Build dictionary of asteroids by location after pre-sorting the asteroids
# by direction and then distance order. Pre-sorting avoids having to sort
# the queues individually later.
direction_queues: DefaultDict[Direction, Deque[Location]] = defaultdict(deque)
for asteroid in sorted(asteroids, key=direction_and_distance_from_centre):
direction = basic_direction(centre, asteroid)
direction_queues[direction].append(asteroid)
# Sort the queues (not their elements) by the angle of their basic
# direction (the key) so that the asteroids can be iterated in a
# circular manner.
#
# We deliberately misprovide the arguments to atan2 to adjust the ray
# from which the angle is calculated, and the direction in which it is
# calculated.
#
# `-atan2(x, y)` would usually give an anti-clockwise order from
# the 6 position, but as our vertical axis is inverted it gives the
# equivalent of `-atan2(x, -y)`, which is a clockwise traversal
# from the 12 position.
sorted_by_angle = sorted(
direction_queues.items(), key=lambda item: -atan2(item[0].across, item[0].down)
)
return [queue for direction, queue in sorted_by_angle]
def destroy_asteroids_in_order(
laser: Location, asteroids: Iterable[Location]
) -> Iterator[Location]:
clockwise_order_from_above = clockwise_asteroid_queues(laser, asteroids)
while clockwise_order_from_above:
for queue in clockwise_order_from_above:
try:
# Closer asteroids are at the front of the queue,
# so popleft rather than just pop.
yield queue.popleft()
except IndexError:
# Queue is exhausted, so remove it.
clockwise_order_from_above.remove(queue)
def find_nth_asteroid_destroyed(
laser: Location, asteroids: Iterable[Location], n: int = 200
) -> Location:
destroyed_gen = destroy_asteroids_in_order(laser, asteroids)
for number, destroyed_asteroid in enumerate(destroyed_gen, start=1):
if number == n:
return destroyed_asteroid
raise ValueError(f"Too few asteroids given to find number {n} destroyed.")
def test_asteroid_destruction() -> None:
grid_string = """\
.#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##
"""
grid = parse_grid(grid_string)
laser = find_best_spot_for_monitoring_station(grid).asteroid
asteroids = grid.asteroids - {laser}
destruction_order = list(destroy_asteroids_in_order(laser, asteroids))
expected = (
(1, (11, 12)),
(2, (12, 1)),
(3, (12, 2)),
(10, (12, 8)),
(20, (16, 0)),
(50, (16, 9)),
(100, (10, 16)),
(199, (9, 6)),
(200, (8, 2)),
(201, (10, 9)),
(299, (11, 1)),
)
filtered = [destruction_order[n - 1] for n, _ in expected]
locations = [Location(*coords) for _, coords in expected]
assert locations == filtered
def main(grid: ParsedGrid) -> Tuple[int, int]:
best_spot = find_best_spot_for_monitoring_station(grid)
asteroid_200 = find_nth_asteroid_destroyed(
best_spot.asteroid, grid.asteroids - {best_spot.asteroid}
)
asteroid_200_coord = asteroid_200.across * 100 + asteroid_200.down
return best_spot.asteroids_visible, asteroid_200_coord
if __name__ == "__main__":
grid = parse_grid(aoc.load_puzzle_input(2019, DAY))
part_one_solution, part_two_solution = main(grid)
assert (
part_one_solution == 276
), "Part one solution doesn't match known-correct answer."
assert (
part_two_solution == 1321
), "Part two solution doesn't match known-correct answer."
print(
aoc.format_solution(
title=__doc__,
part_one=part_one_solution,
part_two=part_two_solution,
)
)
| [
"aoc.format_solution",
"math.sqrt",
"functools.partial",
"collections.defaultdict",
"aoc.load_puzzle_input",
"math.atan2"
] | [((3366, 3423), 'math.sqrt', 'sqrt', (['((b.across - a.across) ** 2 + (b.down - a.down) ** 2)'], {}), '((b.across - a.across) ** 2 + (b.down - a.down) ** 2)\n', (3370, 3423), False, 'from math import atan2, gcd, sqrt\n'), ((3745, 3784), 'functools.partial', 'partial', (['direction_and_distance', 'centre'], {}), '(direction_and_distance, centre)\n', (3752, 3784), False, 'from functools import partial\n'), ((4045, 4063), 'collections.defaultdict', 'defaultdict', (['deque'], {}), '(deque)\n', (4056, 4063), False, 'from collections import defaultdict, deque\n'), ((7545, 7577), 'aoc.load_puzzle_input', 'aoc.load_puzzle_input', (['(2019)', 'DAY'], {}), '(2019, DAY)\n', (7566, 7577), False, 'import aoc\n'), ((7871, 7966), 'aoc.format_solution', 'aoc.format_solution', ([], {'title': '__doc__', 'part_one': 'part_one_solution', 'part_two': 'part_two_solution'}), '(title=__doc__, part_one=part_one_solution, part_two=\n part_two_solution)\n', (7890, 7966), False, 'import aoc\n'), ((4912, 4947), 'math.atan2', 'atan2', (['item[0].across', 'item[0].down'], {}), '(item[0].across, item[0].down)\n', (4917, 4947), False, 'from math import atan2, gcd, sqrt\n')] |
from phue import Bridge
import time
from secrets import hueIP
bridge = Bridge(hueIP)
try:
# If the app is not registered and the button is not pressed, press the button and call connect() (this only needs to be run a single time)
bridge.connect()
except Exception:
print("Press the Hue Bridge button to connect")
# # Get the bridge state (This returns the full dictionary that you can explore)
# print(bridge.get_api())
# controlledLightString = input("Exact name of the light that should be controlled: ")
controlledLightString = "Air"
# You can also use light names instead of the id
controlledLight = bridge.get_light(controlledLightString)
# print(controlledLight)
# The type of light
controlledLightType = str(controlledLight['config']['archetype'])
print("The light you have selected is a '" + controlledLightType + "'")
def BPMtoSeconds(bpm):
return 60/bpm
# While button is pressed
def StrobeLight():
# Make sure the light is on
bridge.set_light(controlledLightString, 'on', True)
# Turn the light on command
onCommand = {'transitiontime' : 0, 'bri' : 254}
offCommand = {'transitiontime' : 0, 'bri' : 0}
# Flash light 10 times
for i in range(0, 20):
bridge.set_light(controlledLightString, onCommand)
time.sleep(0.0001)
bridge.set_light(controlledLightString, offCommand)
def BeatLightMatch():
# Make sure the light is on
bridge.set_light(controlledLightString, 'on', True)
# Turn the light on command
onCommand = {'transitiontime' : 1, 'bri' : 254}
offCommand = {'transitiontime' : 1, 'bri' : 0}
# Flash light 10 times
for i in range(0, 10):
bridge.set_light(controlledLightString, onCommand)
time.sleep(0.125)
bridge.set_light(controlledLightString, offCommand)
time.sleep(0.125)
def TurnLightOff():
bridge.set_light(controlledLightString, 'on', False)
StrobeLight()
TurnLightOff()
# BeatLightMatch()
# # Prints if light 1 is on or not
# b.get_light(1, 'on')
# # Set brightness of lamp 1 to max
# b.set_light(1, 'bri', 254)
# # Set brightness of lamp 2 to 50%
# b.set_light(2, 'bri', 127)
# # Turn lamp 2 on
# b.set_light(2,'on', True)
# # You can also control multiple lamps by sending a list as lamp_id
# b.set_light( [1,2], 'on', True)
# # Get the name of a lamp
# b.get_light(1, 'name')
# # You can also use light names instead of the id
# b.get_light('Kitchen')
# b.set_light('Kitchen', 'bri', 254)
# # Also works with lists
# b.set_light(['Bathroom', 'Garage'], 'on', False)
# # The set_light method can also take a dictionary as the second argument to do more fancy stuff
# # This will turn light 1 on with a transition time of 30 seconds
# command = {'transitiontime' : 300, 'on' : True, 'bri' : 254}
# b.set_light(1, command)
| [
"phue.Bridge",
"time.sleep"
] | [((72, 85), 'phue.Bridge', 'Bridge', (['hueIP'], {}), '(hueIP)\n', (78, 85), False, 'from phue import Bridge\n'), ((1254, 1272), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (1264, 1272), False, 'import time\n'), ((1669, 1686), 'time.sleep', 'time.sleep', (['(0.125)'], {}), '(0.125)\n', (1679, 1686), False, 'import time\n'), ((1743, 1760), 'time.sleep', 'time.sleep', (['(0.125)'], {}), '(0.125)\n', (1753, 1760), False, 'import time\n')] |
# import time
from flask import Flask,redirect
from .TeamAssigner import assignTeam
from .CandidateAssigner import assignCandidate
app = Flask(__name__)
@app.route("/executeAlgo")
def execute_algo():
assignTeam()
return redirect("http://localhost:3000/team_match_success")
# return {
# "msg": "success",
# }
@app.route("/executeCand")
def execute_cand():
assignCandidate()
return redirect("http://localhost:3000/candidate_match_success")
# return {
# "msg": "CandidateAssigner success",
# }
| [
"flask.redirect",
"flask.Flask"
] | [((137, 152), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (142, 152), False, 'from flask import Flask, redirect\n'), ((230, 282), 'flask.redirect', 'redirect', (['"""http://localhost:3000/team_match_success"""'], {}), "('http://localhost:3000/team_match_success')\n", (238, 282), False, 'from flask import Flask, redirect\n'), ((415, 472), 'flask.redirect', 'redirect', (['"""http://localhost:3000/candidate_match_success"""'], {}), "('http://localhost:3000/candidate_match_success')\n", (423, 472), False, 'from flask import Flask, redirect\n')] |
from typing import Optional
import pandas as pd
from episuite import data
class GoogleMobility:
"""This is a class implementing a client for the Google
Community Mobility Reports.
.. seealso::
`Google Community Mobility Report <https://www.google.com/covid19/mobility/>`_
Community Mobility Report website.
:param report_url: alternative report download link
"""
DEFAULT_REPORT_URL: str = \
"https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
def __init__(self, report_url: Optional[str] = None):
self.report_url = report_url or GoogleMobility.DEFAULT_REPORT_URL
def load_report(self, country_region_code: Optional[str] = None,
show_progress: bool = True, cache: bool = True) -> pd.DataFrame:
"""Load the report from Google and optionally cache it or fitler
by a country code. Given that the mobility report is a large file,
it is highly recommended to specify the country region code.
:param country_region_code: The country region code, i.e. "BR"
for Brazil.
:param show_progress: Show a progress bar for the download
:param cache: If cache should be done or not, default to True
:returns: a dataframe with the results already filtered and parsed
"""
fpath = data.load_from_cache(self.report_url, "google_mobility.csv",
"Google Mobility Report",
show_progress=show_progress,
invalidate=not cache)
if country_region_code is None:
df = pd.read_csv(fpath, low_memory=False,
parse_dates=["date"])
else:
iter_csv = pd.read_csv(fpath, low_memory=False,
parse_dates=["date"],
iterator=True, chunksize=5000)
df = pd.concat([chunk[chunk['country_region_code'] == country_region_code]
for chunk in iter_csv])
return df
| [
"episuite.data.load_from_cache",
"pandas.read_csv",
"pandas.concat"
] | [((1384, 1530), 'episuite.data.load_from_cache', 'data.load_from_cache', (['self.report_url', '"""google_mobility.csv"""', '"""Google Mobility Report"""'], {'show_progress': 'show_progress', 'invalidate': '(not cache)'}), "(self.report_url, 'google_mobility.csv',\n 'Google Mobility Report', show_progress=show_progress, invalidate=not cache\n )\n", (1404, 1530), False, 'from episuite import data\n'), ((1690, 1748), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {'low_memory': '(False)', 'parse_dates': "['date']"}), "(fpath, low_memory=False, parse_dates=['date'])\n", (1701, 1748), True, 'import pandas as pd\n'), ((1815, 1908), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {'low_memory': '(False)', 'parse_dates': "['date']", 'iterator': '(True)', 'chunksize': '(5000)'}), "(fpath, low_memory=False, parse_dates=['date'], iterator=True,\n chunksize=5000)\n", (1826, 1908), True, 'import pandas as pd\n'), ((1992, 2089), 'pandas.concat', 'pd.concat', (["[chunk[chunk['country_region_code'] == country_region_code] for chunk in\n iter_csv]"], {}), "([chunk[chunk['country_region_code'] == country_region_code] for\n chunk in iter_csv])\n", (2001, 2089), True, 'import pandas as pd\n')] |
from datetime import date, datetime
from decimal import Decimal
from enum import Enum
from typing import Union
from dacite import from_dict, Config
from konduto import KONDUTO_DOMAIN
from konduto.api.clients import KondutoHttpClient
from konduto.api.resources.konduto_order_status import KondutoOrderStatus
from konduto.api.resources.requests.konduto_order_request import KondutoOrderRequest
from konduto.api.resources.requests.konduto_order_status_request import KondutoOrderStatusRequest
from konduto.api.resources.response.konduto_error import KondutoError
from konduto.api.resources.response.konduto_order_response import KondutoOrderResponse, KondutoRecommendation
from konduto.infrastructure.either import Right, Left
from konduto.infrastructure.parsers import datetime_str_to_datetime, float_to_decimal, date_str_to_date, to_int
ENDPOINT = f'{KONDUTO_DOMAIN}v1/orders'.strip('/')
class KondutoOrderClient(KondutoHttpClient):
def create(self, payload: KondutoOrderRequest) -> Union[Right, Left]:
result = self.post(ENDPOINT, payload.json)
if result.is_right:
response_order = result.value['order']
recommendation = KondutoRecommendation.from_string(response_order['recommendation'])
order_response = KondutoOrderResponse(id=response_order['id'], score=response_order['score'],
recommendation=recommendation,
status=KondutoOrderStatus.from_string(response_order['status']))
return Right(order_response)
return result
def change_status(self, order_id: str, payload: KondutoOrderStatusRequest) -> Union[Right, Left]:
result = self.put(f'{ENDPOINT}/{order_id}', payload.json)
return result
def load(self, order_id: str) -> Union[Right, Left]:
result = self.get(f'{ENDPOINT}/{order_id}')
if result.is_right:
hooks = {date: date_str_to_date, Decimal: float_to_decimal, datetime: datetime_str_to_datetime,
int: to_int}
response = from_dict(data_class=KondutoOrderResponse, data=result.value['order'],
config=Config(cast=[Enum, int], type_hooks=hooks))
return Right(response)
return result
| [
"konduto.api.resources.konduto_order_status.KondutoOrderStatus.from_string",
"konduto.api.resources.response.konduto_order_response.KondutoRecommendation.from_string",
"dacite.Config",
"konduto.infrastructure.either.Right"
] | [((1172, 1239), 'konduto.api.resources.response.konduto_order_response.KondutoRecommendation.from_string', 'KondutoRecommendation.from_string', (["response_order['recommendation']"], {}), "(response_order['recommendation'])\n", (1205, 1239), False, 'from konduto.api.resources.response.konduto_order_response import KondutoOrderResponse, KondutoRecommendation\n'), ((1561, 1582), 'konduto.infrastructure.either.Right', 'Right', (['order_response'], {}), '(order_response)\n', (1566, 1582), False, 'from konduto.infrastructure.either import Right, Left\n'), ((2275, 2290), 'konduto.infrastructure.either.Right', 'Right', (['response'], {}), '(response)\n', (2280, 2290), False, 'from konduto.infrastructure.either import Right, Left\n'), ((1484, 1540), 'konduto.api.resources.konduto_order_status.KondutoOrderStatus.from_string', 'KondutoOrderStatus.from_string', (["response_order['status']"], {}), "(response_order['status'])\n", (1514, 1540), False, 'from konduto.api.resources.konduto_order_status import KondutoOrderStatus\n'), ((2212, 2254), 'dacite.Config', 'Config', ([], {'cast': '[Enum, int]', 'type_hooks': 'hooks'}), '(cast=[Enum, int], type_hooks=hooks)\n', (2218, 2254), False, 'from dacite import from_dict, Config\n')] |
# coding=utf8
"""
@author: <NAME>
@date: 09/26/2019
@code description: It is a Python3 file to implement cosine similarity with TF-IDF and Word Embedding methods.
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statistics
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
glove_path = "../../data/glove.6B.50d.txt"
punctuation_list = ['.', ',', '!', '?', '@', '#', '$', '%', '^', '&', '*', '(', ')']
def tf_idf(tfidf_vectorizer, corpus):
"""
It is a function to use TF-IDF to vectorize corpus.
:param corpus: corpus to fit
:return: vector
"""
X = tfidf_vectorizer.fit_transform(corpus)
return X.toarray()
def calculate_cosine_similarity(vec, *args):
"""
It is a function to calculate cosine similarity.
:param vec: vector
:return: cosine similarity result
"""
return cosine_similarity(vec, args)
def load_GloVe_model(path):
"""
It is a function to load GloVe model
:param path: model path
:return: model array
"""
print("Load GloVe Model.")
with open(path, 'r') as f:
content = f.readlines()
model = {}
for line in content:
splitLine = line.split()
word = splitLine[0]
embedding = np.array((splitLine[1:]))
model[word] = embedding
print("Done.", len(model), " words loaded!\n")
return model
def process(sentence, lemmatizer):
"""
It is a function to
:param sentence:
:return:
"""
res = []
# 1. Split
splitSentence = sentence.split()
# 2. To lower case
for word in splitSentence:
word = word.lower()
# 3. Lemmatize
word = lemmatizer.lemmatize(word)
# 4. Remove stop words
stopword_set = set(stopwords.words("english"))
if word in stopword_set:
continue
# 5. Remove punctuation
if word in punctuation_list:
continue
res.append(word)
return res
def get_glove_vec(sentence, lemmatizer, glove_model):
"""
It is a function to get glove vector for cosine similarity calculation.
:param process_sen: processed sentence
:param glove_model: GloVe model
:return: np.mean(process_sen)
"""
res = []
for word in process(sentence, lemmatizer):
try:
vec = glove_model.get(word).astype(float)
res.append(np.mean(vec))
except Exception:
continue
return res
def calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model):
"""
It is a function to calculate GloVe embedding cosine similarity.
:param glove_model: GloVe model
:return: GloVe cosine similarity
"""
# 1. Get GloVe Vector
s1_vec = get_glove_vec(s1, lemmatizer, glove_model) # <List> object
s2_vec = get_glove_vec(s2, lemmatizer, glove_model)
# 2. Measure the length of vector
try:
if len(s1_vec) == len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.array((s2_vec)).reshape(1, -1)
elif len(s1_vec) > len(s2_vec):
s1_array = np.array((s1_vec)).reshape(1, -1)
s2_array = np.zeros(shape=(1, len(s1_vec)))
s2_array[0, :len(s2_vec)] = s2_vec
else:
s2_array = np.array((s2_vec)).reshape(1, -1)
s1_array = np.zeros(shape=(1, len(s2_vec)))
s1_array[0, :len(s1_vec)] = s1_vec
assert s1_array.shape == s2_array.shape
s1_mean = np.mean(s1_array, axis=0).reshape(1, -1)
s2_mean = np.mean(s2_array, axis=0).reshape(1, -1)
return cosine_similarity(s1_mean, s2_mean)[0][0]
except Exception as e:
print(e)
def main():
corpus = ['The president greets the press in Chicago',
'Obama speaks to the media in Illinois']
s1 = 'The president greets the press in Chicago'
s2 = 'Obama speaks to the media in Illinois'
s3 = 'I love you'
s4 = 'We went to Starbucks to buy hazelnut lattee yesterday'
s5 = 'We often go to Starbucks to buy coffee and chat with each other.!!!!!!'
############## 1. TF-IDF ###############
tf_idf_vectorizer = TfidfVectorizer()
tf_idf_vec = tf_idf(tfidf_vectorizer=tf_idf_vectorizer,
corpus=corpus)
print("tf_idf_vec = ", tf_idf_vec)
print("tf_idf_vec.shape = ", tf_idf_vec.shape)
print("tf_idf_vectorizer.get_feature_names() = ", tf_idf_vectorizer.get_feature_names())
##### 2. TF-IDF Cosine Similarity ######
tfidf_cosine_res = cosine_similarity(tf_idf_vec)[0][1]
print("tfidf_cosine_res = ", tfidf_cosine_res)
print("\n")
########### 3. Lemmatization ###########
lemmatizer = WordNetLemmatizer()
########## 4. Load GloVe Model #########
glove_model = load_GloVe_model(glove_path) # len(glove_model) = 400000
###### 5. GloVe Cosine Similarity ######
res = calculate_glove_cosine_similarity(s1, s3, lemmatizer, glove_model)
print("res = ", res)
res1 = calculate_glove_cosine_similarity(s1, s2, lemmatizer, glove_model)
print("res1 = ", res1)
res2 = calculate_glove_cosine_similarity(s2, s3, lemmatizer, glove_model)
print("res2 = ", res2)
res3 = calculate_glove_cosine_similarity(s5, s4, lemmatizer, glove_model)
print("res3 = ", res3)
if __name__ == '__main__':
main() | [
"numpy.mean",
"nltk.corpus.stopwords.words",
"sklearn.metrics.pairwise.cosine_similarity",
"nltk.stem.WordNetLemmatizer",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer"
] | [((1062, 1090), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['vec', 'args'], {}), '(vec, args)\n', (1079, 1090), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4364, 4381), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4379, 4381), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4901, 4920), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (4918, 4920), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1446, 1469), 'numpy.array', 'np.array', (['splitLine[1:]'], {}), '(splitLine[1:])\n', (1454, 1469), True, 'import numpy as np\n'), ((1956, 1982), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1971, 1982), False, 'from nltk.corpus import stopwords\n'), ((4734, 4763), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tf_idf_vec'], {}), '(tf_idf_vec)\n', (4751, 4763), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2581, 2593), 'numpy.mean', 'np.mean', (['vec'], {}), '(vec)\n', (2588, 2593), True, 'import numpy as np\n'), ((3691, 3716), 'numpy.mean', 'np.mean', (['s1_array'], {'axis': '(0)'}), '(s1_array, axis=0)\n', (3698, 3716), True, 'import numpy as np\n'), ((3750, 3775), 'numpy.mean', 'np.mean', (['s2_array'], {'axis': '(0)'}), '(s2_array, axis=0)\n', (3757, 3775), True, 'import numpy as np\n'), ((3806, 3841), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['s1_mean', 's2_mean'], {}), '(s1_mean, s2_mean)\n', (3823, 3841), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3158, 3174), 'numpy.array', 'np.array', (['s1_vec'], {}), '(s1_vec)\n', (3166, 3174), True, 'import numpy as np\n'), ((3215, 3231), 'numpy.array', 'np.array', (['s2_vec'], {}), '(s2_vec)\n', (3223, 3231), True, 'import numpy as np\n'), ((3313, 3329), 'numpy.array', 'np.array', (['s1_vec'], {}), '(s1_vec)\n', (3321, 3329), True, 'import numpy as np\n'), ((3487, 3503), 'numpy.array', 'np.array', (['s2_vec'], {}), '(s2_vec)\n', (3495, 3503), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This file contains implementations of the functions used to train a CNN model:
train_cnn - Function used to facilitate the training of the Convolutinal Neural Network model.
test_cnn - Function used to facilitate the testing of the Convolutinal Neural Network model.
"""
# Built-in/Generic Imports
import os
import time
# Library Imports
import torch
import numpy as np
import pandas as pd
from torch.cuda import amp
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.optim import SGD, LBFGS, lr_scheduler
from torch.utils.tensorboard import SummaryWriter
# Own Modules
from utils import log
from model import Classifier
from dataset import get_datasets
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, Selective Dermatology"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "3.0.0"
__maintainer = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def train_cnn(arguments, device):
"""
Function for training of the Convolutional neural network.
:param arguments: ArgumentParser Namespace object with arguments used for training.
:param device: PyTorch device that will be used for training.
:return: Lists of training and validation losses and an integer for the best performing epoch.
"""
# Loads a TensorBoard Summary Writer.
if arguments.tensorboard_dir != "":
writer = SummaryWriter(os.path.join(arguments.tensorboard_dir, arguments.task, arguments.experiment))
# Loads the training and validation data.
train_data, val_data, _ = get_datasets(arguments)
# Creates the training data loader using the dataset objects.
training_data_loader = DataLoader(train_data, batch_size=arguments.batch_size,
shuffle=True, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
# Creates the validation data loader using the dataset objects.
validation_data_loader = DataLoader(val_data, batch_size=arguments.batch_size,
shuffle=False, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
log(arguments, "Loaded Datasets\n")
# Initialises the classifier model.
classifier = Classifier(arguments.efficient_net)
# Sets the classifier to training mode.
classifier.train()
# Moves the classifier to the selected device.
classifier.to(device)
# Initialises the optimiser used to optimise the parameters of the model.
optimiser = SGD(params=classifier.parameters(), lr=arguments.starting_lr)
# Initialises the learning rate scheduler to adjust the learning rate during training.
scheduler = lr_scheduler.CyclicLR(optimiser, base_lr=arguments.starting_lr, max_lr=arguments.maximum_lr)
# Initialises the gradient scaler used for 16 but precision.
if arguments.precision == 16 and device != torch.device("cpu"):
scaler = amp.GradScaler()
log(arguments, "Models Initialised")
# Declares the main logging variables for the training.
start_time = time.time()
losses, validation_losses, temperatures = [], [], []
best_loss, best_epoch, total_batches = 1e10, 0, 0
log(arguments, "Training Timer Started\n")
# The beginning of the main training loop.
for epoch in range(1, arguments.max_epochs + 1):
# Declares the logging variables for the epoch.
epoch_acc, epoch_loss, epoch_risk, epoch_coverage,num_batches = 0, 0, 0, 0, 0
# Loops through the training data batches.
for images, labels in training_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Resets the gradients in the model.
optimiser.zero_grad()
# Perform training with 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Using the gradient scaler performs backward propagation.
scaler.scale(loss).backward()
# Update the weights of the model using the optimiser.
scaler.step(optimiser)
# Updates the scale factor of the gradient scaler.
scaler.update()
# Performs training with 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs backward propagation.
loss.backward()
# Update the weights of the model using the optimiser.
optimiser.step()
# Updates the learning rate scheduler.
scheduler.step()
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to epoch sum.
num_batches += 1
epoch_loss += loss.item()
epoch_acc += batch_accuracy
epoch_coverage += batch_coverage
epoch_risk += batch_risk
# Writes the batch loss and accuracy to TensorBoard logger.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/batch", loss.item(), num_batches + total_batches)
writer.add_scalar("Accuracy/batch", batch_accuracy, num_batches + total_batches)
# Logs the details of the epoch progress.
if num_batches % arguments.log_interval == 0:
log(arguments, "Time: {}s\tTrain Epoch: {} [{}/{}] ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.6f}".format(
str(int(time.time() - start_time)).rjust(6, '0'), str(epoch).rjust(2, '0'),
str(num_batches * arguments.batch_size).rjust(len(str(len(train_data))), '0'),
len(train_data), 100. * num_batches / (len(train_data) / arguments.batch_size),
epoch_loss / num_batches, epoch_acc / num_batches))
# If the number of batches have been reached end epoch.
if num_batches == arguments.batches_per_epoch:
break
# Updates the total number of batches (used for logging).
total_batches += num_batches
# Writes epoch loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/train", epoch_loss / num_batches, epoch)
writer.add_scalar("Accuracy/train", epoch_acc / num_batches, epoch)
writer.add_scalar("Coverage/train", epoch_coverage / num_batches, epoch)
writer.add_scalar("Selective Risk/train", epoch_risk / num_batches, epoch)
# Declares the logging variables for validation.
validation_acc, validation_loss, validation_risk, validation_coverage, validation_batches = 0, 0, 0, 0, 0
logit_list, label_list = [], []
temperature = torch.nn.Parameter(torch.ones(1, device=device))
temp_optimiser = LBFGS([temperature], lr=0.01, max_iter=1000, line_search_fn="strong_wolfe")
# Performs the validation epoch with no gradient calculations.
with torch.no_grad():
# Loops through the training data batches.
for images, labels in validation_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Performs forward propagation using 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs forward propagation using 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
logit_list.append(logits)
label_list.append(labels)
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to validation sum.
validation_batches += 1
validation_loss += loss.item()
validation_acc += batch_accuracy
validation_coverage += batch_coverage
validation_risk += batch_risk
# If the number of batches have been reached end validation.
if validation_batches == arguments.batches_per_epoch:
break
logit_list = torch.cat(logit_list).to(device)
label_list = torch.cat(label_list).to(device)
def _eval():
temp_loss = F.cross_entropy(torch.div(logit_list, temperature), label_list)
temp_loss.backward()
return temp_loss
temp_optimiser.step(_eval)
temperatures.append(temperature.item())
# Writes validation loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/validation", validation_loss / validation_batches, epoch)
writer.add_scalar("Accuracy/validation", validation_acc / validation_batches, epoch)
writer.add_scalar("Coverage/validation", validation_coverage / validation_batches, epoch)
writer.add_scalar("Selective Risk/validation", validation_risk / validation_batches, epoch)
# Adds the training and validation losses to their respective lists.
losses.append(epoch_loss / num_batches)
validation_losses.append(validation_loss / validation_batches)
# Logs the details of the training epoch.
log(arguments, "\nEpoch: {}\Training Loss: {:.6f}\tTraining Accuracy: {:.6f}\t"
"Training Coverage: {:.6f}\tTraining Selective Risk: {:.6f}\n"
"Validation Loss: {:.6f}\tValidation Accuracy: {:.6f}\t"
"Validation Coverage: {:.6f}\tValidation Selective Risk: {:.6f}\n".
format(epoch, losses[-1], epoch_acc / num_batches, epoch_coverage / num_batches, epoch_risk / num_batches,
validation_losses[-1], validation_acc / validation_batches,
validation_coverage / validation_batches, validation_risk / validation_batches))
# If the current epoch has the best validation loss then save the model with the prefix best.
if validation_losses[-1] < best_loss:
best_loss = validation_losses[-1]
best_epoch = epoch
classifier.save_model(arguments.model_dir, arguments.experiment)
# Saves the model with the current epoch as the prefix.
classifier.save_model(arguments.model_dir, arguments.experiment, str(epoch))
# Checks if the training has performed the minimum number of epochs.
if epoch >= arguments.min_epochs:
# Calculates the generalised validation loss.
g_loss = 100 *((validation_losses[-1] / min(validation_losses[:-1])) - 1)
# Calculates the training progress using a window over the training losses.
t_progress = 1000 * ((sum(losses[-(arguments.window + 1): - 1]) /
(arguments.window * min(losses[-(arguments.window + 1): - 1]))) - 1)
# Compares the generalised loss and training progress against a selected target value.
if g_loss / t_progress > arguments.stop_target:
break
# Logs the final training information.
log(arguments, f"\nTraining finished after {epoch} epochs in {int(time.time() - start_time)}s")
log(arguments, f"Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}")
# Returns the loss values from training and validation epochs and the best epoch.
return temperatures[best_epoch - 1]
def test_cnn(arguments, device):
"""
Function for testing the Convolutional neural network and generate csv files with all predictions.
:param arguments: ArgumentParser Namespace object with arguments used for training.
:param device: PyTorch device that will be used for training.
:return: Lists of training and validation losses and an integer for the best performing epoch.
"""
# Loads the training and validation data.
_, _, test_data = get_datasets(arguments)
# Creates the validation data loader using the dataset objects.
testing_data_loader = DataLoader(test_data, batch_size=arguments.batch_size,
shuffle=False, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
log(arguments, "Loaded Datasets\n")
# Initialises the classifier model.
classifier = Classifier(arguments.efficient_net, pretrained=False)
# Loads the trained model.
classifier.load_state_dict(torch.load(os.path.join(arguments.model_dir, f"{arguments.experiment}_cnn_best.pt")))
# Sets the classifier to evaluation mode.
classifier.eval()
# Moves the classifier to the selected device.
classifier.to(device)
test_labels, testing_batches = [], 0
test_sr_mal, test_sr_ben, test_sr_selections = [], [], []
test_tmp_mal, test_tmp_ben, test_tmp_selections = [], [], []
test_mc_mal, test_mc_ben, test_mc_selections = [], [], []
with torch.no_grad():
for images, labels in testing_data_loader:
images = images.to(device)
labels = labels.cpu().numpy()
# Performs forward propagation using 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
# Performs forward propagation using 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
sr_predictions = F.softmax(logits, dim=1).cpu().numpy()
sr_selections = np.amax(sr_predictions, axis=1)
test_sr_mal += sr_predictions[:, 0].tolist()
test_sr_ben += sr_predictions[:, 1].tolist()
test_sr_selections += sr_selections.tolist()
tmp_predictions = F.softmax(torch.div(logits, arguments.temperature), dim=1).cpu().numpy()
tmp_selections = np.amax(tmp_predictions, axis=1)
test_tmp_mal += tmp_predictions[:, 0].tolist()
test_tmp_ben += tmp_predictions[:, 1].tolist()
test_tmp_selections += tmp_selections.tolist()
mc_predictions = []
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
for _ in range(arguments.drop_iterations):
mc_predictions.append(classifier(images, dropout=True))
else:
for _ in range(arguments.drop_iterations):
mc_predictions.append(classifier(images, dropout=True))
mc_predictions = torch.stack(mc_predictions)
mc_predictions = F.softmax(mc_predictions, dim=2).cpu().numpy()
mc_selections = [np.var(mc_predictions[:, i, 0]) for i in range(mc_predictions.shape[1])]
mc_predictions = np.mean(mc_predictions, 0)
test_mc_mal += mc_predictions[:, 0].tolist()
test_mc_ben += mc_predictions[:, 1].tolist()
test_mc_selections += mc_selections
test_labels += labels.tolist()
testing_batches += 1
# If the number of batches have been reached end validation.
if testing_batches == arguments.batches_per_epoch:
break
filenames = [os.path.basename(file_path)[:-4] for file_path in test_data.filenames]
sr_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_sr_mal,
"ben": test_sr_ben,
"sel": test_sr_selections})
tmp_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_tmp_mal,
"ben": test_tmp_ben,
"sel": test_tmp_selections})
mc_output = pd.DataFrame({"image": filenames[:len(test_labels)],
"label": test_labels,
"mal": test_mc_mal,
"ben": test_mc_ben,
"sel": test_mc_selections})
os.makedirs(arguments.output_dir, exist_ok=True)
sr_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_sr_output.csv"), index=False)
tmp_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_tmp_output.csv"), index=False)
mc_output.to_csv(os.path.join(arguments.output_dir, f"{arguments.experiment}_mc_output.csv"), index=False)
| [
"model.Classifier",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.cuda.amp.GradScaler",
"torch.optim.lr_scheduler.CyclicLR",
"torch.cuda.amp.autocast",
"utils.log",
"torch.nn.functional.log_softmax",
"time.time",
"torch.cat",
"torch.device",
"os.makedirs",
"torch.stack",
"os.path.joi... | [((1630, 1653), 'dataset.get_datasets', 'get_datasets', (['arguments'], {}), '(arguments)\n', (1642, 1653), False, 'from dataset import get_datasets\n'), ((1748, 1892), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(True)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(train_data, batch_size=arguments.batch_size, shuffle=True,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (1758, 1892), False, 'from torch.utils.data import DataLoader\n'), ((2063, 2206), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(False)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(val_data, batch_size=arguments.batch_size, shuffle=False,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (2073, 2206), False, 'from torch.utils.data import DataLoader\n'), ((2288, 2323), 'utils.log', 'log', (['arguments', '"""Loaded Datasets\n"""'], {}), "(arguments, 'Loaded Datasets\\n')\n", (2291, 2323), False, 'from utils import log\n'), ((2382, 2417), 'model.Classifier', 'Classifier', (['arguments.efficient_net'], {}), '(arguments.efficient_net)\n', (2392, 2417), False, 'from model import Classifier\n'), ((2829, 2926), 'torch.optim.lr_scheduler.CyclicLR', 'lr_scheduler.CyclicLR', (['optimiser'], {'base_lr': 'arguments.starting_lr', 'max_lr': 'arguments.maximum_lr'}), '(optimiser, base_lr=arguments.starting_lr, max_lr=\n arguments.maximum_lr)\n', (2850, 2926), False, 'from torch.optim import SGD, LBFGS, lr_scheduler\n'), ((3095, 3131), 'utils.log', 'log', (['arguments', '"""Models Initialised"""'], {}), "(arguments, 'Models Initialised')\n", (3098, 3131), False, 'from utils import log\n'), ((3210, 3221), 'time.time', 'time.time', ([], {}), '()\n', (3219, 3221), False, 'import time\n'), ((3338, 3380), 'utils.log', 'log', (['arguments', '"""Training Timer Started\n"""'], {}), "(arguments, 'Training Timer Started\\n')\n", (3341, 3380), False, 'from utils import log\n'), ((13832, 13936), 'utils.log', 'log', (['arguments', 'f"""Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}"""'], {}), "(arguments,\n f'Best Epoch {best_epoch} with a temperature of {temperatures[best_epoch - 1]}'\n )\n", (13835, 13936), False, 'from utils import log\n'), ((14551, 14574), 'dataset.get_datasets', 'get_datasets', (['arguments'], {}), '(arguments)\n', (14563, 14574), False, 'from dataset import get_datasets\n'), ((14670, 14814), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'arguments.batch_size', 'shuffle': '(False)', 'num_workers': 'arguments.data_workers', 'pin_memory': '(False)', 'drop_last': '(False)'}), '(test_data, batch_size=arguments.batch_size, shuffle=False,\n num_workers=arguments.data_workers, pin_memory=False, drop_last=False)\n', (14680, 14814), False, 'from torch.utils.data import DataLoader\n'), ((14890, 14925), 'utils.log', 'log', (['arguments', '"""Loaded Datasets\n"""'], {}), "(arguments, 'Loaded Datasets\\n')\n", (14893, 14925), False, 'from utils import log\n'), ((14984, 15037), 'model.Classifier', 'Classifier', (['arguments.efficient_net'], {'pretrained': '(False)'}), '(arguments.efficient_net, pretrained=False)\n', (14994, 15037), False, 'from model import Classifier\n'), ((18974, 19022), 'os.makedirs', 'os.makedirs', (['arguments.output_dir'], {'exist_ok': '(True)'}), '(arguments.output_dir, exist_ok=True)\n', (18985, 19022), False, 'import os\n'), ((3073, 3089), 'torch.cuda.amp.GradScaler', 'amp.GradScaler', ([], {}), '()\n', (3087, 3089), False, 'from torch.cuda import amp\n'), ((8166, 8241), 'torch.optim.LBFGS', 'LBFGS', (['[temperature]'], {'lr': '(0.01)', 'max_iter': '(1000)', 'line_search_fn': '"""strong_wolfe"""'}), "([temperature], lr=0.01, max_iter=1000, line_search_fn='strong_wolfe')\n", (8171, 8241), False, 'from torch.optim import SGD, LBFGS, lr_scheduler\n'), ((15575, 15590), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15588, 15590), False, 'import torch\n'), ((19045, 19120), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_sr_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_sr_output.csv')\n", (19057, 19120), False, 'import os\n'), ((19157, 19233), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_tmp_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_tmp_output.csv')\n", (19169, 19233), False, 'import os\n'), ((19269, 19344), 'os.path.join', 'os.path.join', (['arguments.output_dir', 'f"""{arguments.experiment}_mc_output.csv"""'], {}), "(arguments.output_dir, f'{arguments.experiment}_mc_output.csv')\n", (19281, 19344), False, 'import os\n'), ((1474, 1551), 'os.path.join', 'os.path.join', (['arguments.tensorboard_dir', 'arguments.task', 'arguments.experiment'], {}), '(arguments.tensorboard_dir, arguments.task, arguments.experiment)\n', (1486, 1551), False, 'import os\n'), ((3035, 3054), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3047, 3054), False, 'import torch\n'), ((8111, 8139), 'torch.ones', 'torch.ones', (['(1)'], {'device': 'device'}), '(1, device=device)\n', (8121, 8139), False, 'import torch\n'), ((8327, 8342), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8340, 8342), False, 'import torch\n'), ((15112, 15184), 'os.path.join', 'os.path.join', (['arguments.model_dir', 'f"""{arguments.experiment}_cnn_best.pt"""'], {}), "(arguments.model_dir, f'{arguments.experiment}_cnn_best.pt')\n", (15124, 15184), False, 'import os\n'), ((16346, 16377), 'numpy.amax', 'np.amax', (['sr_predictions'], {'axis': '(1)'}), '(sr_predictions, axis=1)\n', (16353, 16377), True, 'import numpy as np\n'), ((16684, 16716), 'numpy.amax', 'np.amax', (['tmp_predictions'], {'axis': '(1)'}), '(tmp_predictions, axis=1)\n', (16691, 16716), True, 'import numpy as np\n'), ((17368, 17395), 'torch.stack', 'torch.stack', (['mc_predictions'], {}), '(mc_predictions)\n', (17379, 17395), False, 'import torch\n'), ((17606, 17632), 'numpy.mean', 'np.mean', (['mc_predictions', '(0)'], {}), '(mc_predictions, 0)\n', (17613, 17632), True, 'import numpy as np\n'), ((18050, 18077), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (18066, 18077), False, 'import os\n'), ((4950, 4981), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4965, 4981), True, 'from torch.nn import functional as F\n'), ((10776, 10797), 'torch.cat', 'torch.cat', (['logit_list'], {}), '(logit_list)\n', (10785, 10797), False, 'import torch\n'), ((10830, 10851), 'torch.cat', 'torch.cat', (['label_list'], {}), '(label_list)\n', (10839, 10851), False, 'import torch\n'), ((10925, 10959), 'torch.div', 'torch.div', (['logit_list', 'temperature'], {}), '(logit_list, temperature)\n', (10934, 10959), False, 'import torch\n'), ((17503, 17534), 'numpy.var', 'np.var', (['mc_predictions[:, i, 0]'], {}), '(mc_predictions[:, i, 0])\n', (17509, 17534), True, 'import numpy as np\n'), ((4068, 4087), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4080, 4087), False, 'import torch\n'), ((4110, 4124), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (4122, 4124), False, 'from torch.cuda import amp\n'), ((4327, 4358), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4342, 4358), True, 'from torch.nn import functional as F\n'), ((5512, 5529), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {}), '(logits)\n', (5521, 5529), True, 'from torch.nn import functional as F\n'), ((5749, 5773), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (5762, 5773), True, 'from torch.nn import functional as F\n'), ((9353, 9384), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (9368, 9384), True, 'from torch.nn import functional as F\n'), ((15850, 15869), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15862, 15869), False, 'import torch\n'), ((15892, 15906), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (15904, 15906), False, 'from torch.cuda import amp\n'), ((16984, 17003), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16996, 17003), False, 'import torch\n'), ((17026, 17040), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (17038, 17040), False, 'from torch.cuda import amp\n'), ((8746, 8765), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8758, 8765), False, 'import torch\n'), ((8792, 8806), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (8804, 8806), False, 'from torch.cuda import amp\n'), ((9026, 9057), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (9041, 9057), True, 'from torch.nn import functional as F\n'), ((9748, 9765), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {}), '(logits)\n', (9757, 9765), True, 'from torch.nn import functional as F\n'), ((10001, 10025), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(1)'], {}), '(logits, 1)\n', (10014, 10025), True, 'from torch.nn import functional as F\n'), ((13798, 13809), 'time.time', 'time.time', ([], {}), '()\n', (13807, 13809), False, 'import time\n'), ((16278, 16302), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (16287, 16302), True, 'from torch.nn import functional as F\n'), ((17426, 17458), 'torch.nn.functional.softmax', 'F.softmax', (['mc_predictions'], {'dim': '(2)'}), '(mc_predictions, dim=2)\n', (17435, 17458), True, 'from torch.nn import functional as F\n'), ((16591, 16631), 'torch.div', 'torch.div', (['logits', 'arguments.temperature'], {}), '(logits, arguments.temperature)\n', (16600, 16631), False, 'import torch\n'), ((6832, 6843), 'time.time', 'time.time', ([], {}), '()\n', (6841, 6843), False, 'import time\n')] |
import json
import logging
import binascii
from hashlib import sha256
from string import hexdigits
from torba.client.baseaccount import BaseAccount
from torba.client.basetransaction import TXORef
log = logging.getLogger(__name__)
def validate_claim_id(claim_id):
if not len(claim_id) == 40:
raise Exception("Incorrect claimid length: %i" % len(claim_id))
if isinstance(claim_id, bytes):
claim_id = claim_id.decode('utf-8')
if set(claim_id).difference(hexdigits):
raise Exception("Claim id is not hex encoded")
class Account(BaseAccount):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.channel_keys = {}
@property
def hash(self) -> bytes:
h = sha256(json.dumps(self.to_dict(False)).encode())
for cert in sorted(self.channel_keys.keys()):
h.update(cert.encode())
return h.digest()
def apply(self, d: dict):
super().apply(d)
self.channel_keys.update(d.get('certificates', {}))
def add_channel_private_key(self, ref: TXORef, private_key):
assert ref.id not in self.channel_keys, 'Trying to add a duplicate channel private key.'
self.channel_keys[ref.id] = private_key
def get_channel_private_key(self, ref: TXORef):
return self.channel_keys.get(ref.id)
async def maybe_migrate_certificates(self):
if not self.channel_keys:
return
addresses = {}
results = {
'total': 0,
'not-a-claim-tx': 0,
'migrate-success': 0,
'migrate-failed': 0,
'previous-success': 0,
'previous-corrupted': 0
}
double_hex_encoded_to_pop = []
for maybe_claim_id in list(self.channel_keys):
if ':' not in maybe_claim_id:
try:
validate_claim_id(maybe_claim_id)
continue
except Exception:
try:
maybe_claim_id_bytes = maybe_claim_id
if isinstance(maybe_claim_id_bytes, str):
maybe_claim_id_bytes = maybe_claim_id_bytes.encode()
decoded_double_hex = binascii.unhexlify(maybe_claim_id_bytes).decode()
validate_claim_id(decoded_double_hex)
if decoded_double_hex in self.channel_keys:
log.warning("don't know how to migrate certificate %s", decoded_double_hex)
else:
log.info("claim id was double hex encoded, fixing it")
double_hex_encoded_to_pop.append((maybe_claim_id, decoded_double_hex))
except Exception:
continue
for double_encoded_claim_id, correct_claim_id in double_hex_encoded_to_pop:
self.channel_keys[correct_claim_id] = self.channel_keys.pop(double_encoded_claim_id)
for maybe_claim_id in list(self.channel_keys):
results['total'] += 1
if ':' not in maybe_claim_id:
try:
validate_claim_id(maybe_claim_id)
except Exception as e:
log.warning(
"Failed to migrate claim '%s': %s",
maybe_claim_id, str(e)
)
results['migrate-failed'] += 1
continue
claims = await self.ledger.network.get_claims_by_ids(maybe_claim_id)
if maybe_claim_id not in claims:
log.warning(
"Failed to migrate claim '%s', server did not return any claim information.",
maybe_claim_id
)
results['migrate-failed'] += 1
continue
claim = claims[maybe_claim_id]
tx = None
if claim:
tx = await self.ledger.db.get_transaction(txid=claim['txid'])
else:
log.warning(maybe_claim_id)
if tx is not None:
txo = tx.outputs[claim['nout']]
if not txo.script.is_claim_involved:
results['not-a-claim-tx'] += 1
raise ValueError(
"Certificate with claim_id {} doesn't point to a valid transaction."
.format(maybe_claim_id)
)
tx_nout = '{txid}:{nout}'.format(**claim)
self.channel_keys[tx_nout] = self.channel_keys[maybe_claim_id]
del self.channel_keys[maybe_claim_id]
log.info(
"Migrated certificate with claim_id '%s' ('%s') to a new look up key %s.",
maybe_claim_id, txo.script.values['claim_name'], tx_nout
)
results['migrate-success'] += 1
else:
if claim:
addresses.setdefault(claim['address'], 0)
addresses[claim['address']] += 1
log.warning(
"Failed to migrate claim '%s', it's not associated with any of your addresses.",
maybe_claim_id
)
else:
log.warning(
"Failed to migrate claim '%s', it appears abandoned.",
maybe_claim_id
)
results['migrate-failed'] += 1
else:
try:
txid, nout = maybe_claim_id.split(':')
tx = await self.ledger.db.get_transaction(txid=txid)
if not tx:
log.warning(
"Claim migration failed to find a transaction for outpoint %s", maybe_claim_id
)
results['previous-corrupted'] += 1
continue
if tx.outputs[int(nout)].script.is_claim_involved:
results['previous-success'] += 1
else:
results['previous-corrupted'] += 1
except Exception:
log.exception("Couldn't verify certificate with look up key: %s", maybe_claim_id)
results['previous-corrupted'] += 1
self.wallet.save()
log.info('verifying and possibly migrating certificates:')
log.info(json.dumps(results, indent=2))
if addresses:
log.warning('failed for addresses:')
log.warning(json.dumps(
[{'address': a, 'number of certificates': c} for a, c in addresses.items()],
indent=2
))
async def save_max_gap(self):
gap = await self.get_max_gap()
self.receiving.gap = max(20, gap['max_receiving_gap'] + 1)
self.change.gap = max(6, gap['max_change_gap'] + 1)
self.wallet.save()
def get_balance(self, confirmations=0, include_claims=False, **constraints):
if not include_claims:
constraints.update({'is_claim': 0, 'is_update': 0, 'is_support': 0})
return super().get_balance(confirmations, **constraints)
@classmethod
def get_private_key_from_seed(cls, ledger, seed: str, password: str):
return super().get_private_key_from_seed(
ledger, seed, password or '<PASSWORD>'
)
@classmethod
def from_dict(cls, ledger, wallet, d: dict) -> 'Account':
account = super().from_dict(ledger, wallet, d)
account.channel_keys = d.get('certificates', {})
return account
def to_dict(self, include_channel_keys=True):
d = super().to_dict()
if include_channel_keys:
d['certificates'] = self.channel_keys
return d
async def get_details(self, **kwargs):
details = await super().get_details(**kwargs)
details['certificates'] = len(self.channel_keys)
return details
@staticmethod
def constraint_spending_utxos(constraints):
constraints.update({'is_claim': 0, 'is_update': 0, 'is_support': 0})
def get_utxos(self, **constraints):
self.constraint_spending_utxos(constraints)
return super().get_utxos(**constraints)
def get_utxo_count(self, **constraints):
self.constraint_spending_utxos(constraints)
return super().get_utxo_count(**constraints)
def get_claims(self, **constraints):
return self.ledger.db.get_claims(account=self, **constraints)
def get_claim_count(self, **constraints):
return self.ledger.db.get_claim_count(account=self, **constraints)
def get_streams(self, **constraints):
return self.ledger.db.get_streams(account=self, **constraints)
def get_stream_count(self, **constraints):
return self.ledger.db.get_stream_count(account=self, **constraints)
def get_channels(self, **constraints):
return self.ledger.db.get_channels(account=self, **constraints)
def get_channel_count(self, **constraints):
return self.ledger.db.get_channel_count(account=self, **constraints)
def get_supports(self, **constraints):
return self.ledger.db.get_supports(account=self, **constraints)
def get_support_count(self, **constraints):
return self.ledger.db.get_support_count(account=self, **constraints)
async def release_all_outputs(self):
await self.ledger.db.release_all_outputs(self)
| [
"logging.getLogger",
"json.dumps",
"binascii.unhexlify"
] | [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((6684, 6713), 'json.dumps', 'json.dumps', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (6694, 6713), False, 'import json\n'), ((2247, 2287), 'binascii.unhexlify', 'binascii.unhexlify', (['maybe_claim_id_bytes'], {}), '(maybe_claim_id_bytes)\n', (2265, 2287), False, 'import binascii\n')] |
"""Unit Tests for Region model"""
import pytest
from altaudit.models import Region
def test_create_region_table(db):
assert db.has_table('regions')
def test_add_region(db_session):
us = Region(name='US')
db_session.add(us)
assert us == db_session.query(Region).filter(Region.name=='US').first()
db_session.delete(us)
| [
"altaudit.models.Region"
] | [((197, 214), 'altaudit.models.Region', 'Region', ([], {'name': '"""US"""'}), "(name='US')\n", (203, 214), False, 'from altaudit.models import Region\n')] |
from bs4 import BeautifulSoup as bs
from .forecast import Forecast
from .day import Day
def parsetemp(t):
return int(t.find(class_="wob_t").text)
def parseday(d):
s=bs(str(d),'html.parser')
dayname=s.find(class_="QrNVmd Z1VzSb")['aria-label']
desc=s.find(class_="DxhUm").img['alt']
tmps=bs(str(s.find(class_="wNE31c")),'html.parser')
highest=parsetemp(tmps.find(class_="vk_gy gNCp2e"))
lowest=parsetemp(tmps.find(class_="QrNVmd ZXCv8e"))
return Day(dayname,highest,lowest,desc)
def parsefcast(d,temp):
soup=bs(d,'html.parser')
g=soup.find_all(class_="wob_df")
g=[parseday(i) for i in g]
first=g[0]
nxt=g[1:]
return Forecast(temp,first,nxt)
| [
"bs4.BeautifulSoup"
] | [((544, 564), 'bs4.BeautifulSoup', 'bs', (['d', '"""html.parser"""'], {}), "(d, 'html.parser')\n", (546, 564), True, 'from bs4 import BeautifulSoup as bs\n')] |
import os
import re
import time
import shutil
from tempfile import mkdtemp
import operator
from collections.abc import Mapping
from pathlib import Path
import datetime
from .log import Handle
logger = Handle(__name__)
_FLAG_FIRST = object()
class Timewith:
def __init__(self, name=""):
"""Timewith context manager."""
self.name = name
self.start = time.time()
self.checkpoints = []
@property
def elapsed(self):
return time.time() - self.start
def checkpoint(self, name=""):
elapsed = self.elapsed
msg = "{time} {timer}: {checkpoint} in {elapsed:.3f} s.".format(
timer=self.name,
time=datetime.datetime.now().strftime("%H:%M:%S"),
checkpoint=name,
elapsed=elapsed,
).strip()
logger.info(msg)
self.checkpoints.append((name, elapsed))
def __enter__(self):
"""Object returned on entry."""
return self
def __exit__(self, type, value, traceback):
"""Code to execute on exit."""
self.checkpoint("Finished")
self.checkpoints.append(("Finished", self.elapsed))
def temp_path(suffix=""):
"""Return the path of a temporary directory."""
directory = mkdtemp(suffix=suffix)
return Path(directory)
def flatten_dict(d, climb=False, safemode=False):
"""
Flattens a nested dictionary containing only string keys.
This will work for dictionaries which don't have two equivalent
keys at the same level. If you're worried about this, use safemode=True.
Partially taken from https://stackoverflow.com/a/6043835.
Parameters
----------
climb: :class:`bool`, :code:`False`
Whether to keep trunk or leaf-values, for items with the same key.
safemode: :class:`bool`, :code:`True`
Whether to keep all keys as a tuple index, to avoid issues with
conflicts.
Returns
-------
:class:`dict`
Flattened dictionary.
"""
lift = lambda x: (x,)
join = operator.add
results = []
def visit(subdict, results, partialKey):
for k, v in subdict.items():
if partialKey == _FLAG_FIRST:
newKey = lift(k)
else:
newKey = join(partialKey, lift(k))
if isinstance(v, Mapping):
visit(v, results, newKey)
else:
results.append((newKey, v))
visit(d, results, _FLAG_FIRST)
if safemode:
pick_key = lambda keys: keys
else:
pick_key = lambda keys: keys[-1]
sort = map(
lambda x: x[:2],
sorted([(pick_key(k), v, len(k)) for k, v in results], key=lambda x: x[-1]),
) # sorted by depth
if not climb:
# We go down the tree, and prioritise the trunk values
items = sort
else:
# We prioritise the leaf values
items = [i for i in sort][::-1]
return dict(items)
def swap_item(startlist: list, pull: object, push: object):
"""
Swap a specified item in a list for another.
Parameters
----------
startlist : :class:`list`
List to replace item within.
pull
Item to replace in the list.
push
Item to add into the list.
Returns
-------
list
"""
return [[i, push][i == pull] for i in startlist]
def copy_file(src, dst, ext=None, permissions=None):
"""
Copy a file from one place to another.
Uses the full filepath including name.
Parameters
----------
src : :class:`str` | :class:`pathlib.Path`
Source filepath.
dst : :class:`str` | :class:`pathlib.Path`
Destination filepath or directory.
ext : :class:`str`, :code:`None`
Optional file extension specification.
"""
src = Path(src)
dst = Path(dst)
if dst.is_dir():
dst = dst / src.name
if ext is not None:
src = src.with_suffix(ext)
dst = dst.with_suffix(ext)
logger.debug("Copying from {} to {}".format(src, dst))
with open(str(src), "rb") as fin:
with open(str(dst), "wb") as fout:
shutil.copyfileobj(fin, fout)
if permissions is not None:
os.chmod(str(dst), permissions)
def remove_tempdir(directory):
"""
Remove a specific directory, contained files and sub-directories.
Parameters
----------
directory: str, Path
Path to directory.
"""
directory = Path(directory)
try:
shutil.rmtree(str(directory))
assert not directory.exists()
except PermissionError:
pass
| [
"shutil.copyfileobj",
"pathlib.Path",
"datetime.datetime.now",
"tempfile.mkdtemp",
"time.time"
] | [((1248, 1270), 'tempfile.mkdtemp', 'mkdtemp', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (1255, 1270), False, 'from tempfile import mkdtemp\n'), ((1282, 1297), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (1286, 1297), False, 'from pathlib import Path\n'), ((3782, 3791), 'pathlib.Path', 'Path', (['src'], {}), '(src)\n', (3786, 3791), False, 'from pathlib import Path\n'), ((3802, 3811), 'pathlib.Path', 'Path', (['dst'], {}), '(dst)\n', (3806, 3811), False, 'from pathlib import Path\n'), ((4432, 4447), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (4436, 4447), False, 'from pathlib import Path\n'), ((380, 391), 'time.time', 'time.time', ([], {}), '()\n', (389, 391), False, 'import time\n'), ((475, 486), 'time.time', 'time.time', ([], {}), '()\n', (484, 486), False, 'import time\n'), ((4111, 4140), 'shutil.copyfileobj', 'shutil.copyfileobj', (['fin', 'fout'], {}), '(fin, fout)\n', (4129, 4140), False, 'import shutil\n'), ((686, 709), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (707, 709), False, 'import datetime\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import unittest.mock as mock
from zsl.service.service import SessionFactory
from zsl.testing.db import TestSessionFactory as DbTestTestSessionFactory
from zsl.utils.injection_helper import bind
def mock_db_session():
mock_sess = mock.MagicMock()
def session_holder():
return mock_sess
class TestSessionFactory(DbTestTestSessionFactory):
def __init__(self):
super(TestSessionFactory, self).__init__()
self._session_holder = session_holder
bind(SessionFactory, to=TestSessionFactory)
bind(DbTestTestSessionFactory, to=TestSessionFactory)
return mock_sess
| [
"unittest.mock.MagicMock",
"zsl.utils.injection_helper.bind"
] | [((343, 359), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (357, 359), True, 'import unittest.mock as mock\n'), ((607, 650), 'zsl.utils.injection_helper.bind', 'bind', (['SessionFactory'], {'to': 'TestSessionFactory'}), '(SessionFactory, to=TestSessionFactory)\n', (611, 650), False, 'from zsl.utils.injection_helper import bind\n'), ((655, 708), 'zsl.utils.injection_helper.bind', 'bind', (['DbTestTestSessionFactory'], {'to': 'TestSessionFactory'}), '(DbTestTestSessionFactory, to=TestSessionFactory)\n', (659, 708), False, 'from zsl.utils.injection_helper import bind\n')] |
"""
定时任务子模块
# 会初始化一个定时任务句柄
提供对定时任务的基本操作
-- 这里采用额外开启一个线程来控制整个定时任务模块
-- 考虑到多核cpu和一些 cpu密集型程序,
-- 这里采用 多线程的方式执行定时任务
"""
from apscheduler.util import obj_to_ref, utc_timestamp_to_datetime
from pymysql import IntegrityError
from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, \
ParameterException
from bspider.bcron.todo import do
from bspider.master import log
from bspider.master.dao import CronDao
from bspider.utils.tools import get_crontab_next_run_time
class CronService(BaseService):
impl = CronDao()
def add_cron(self, project_id, code_id, cron_type, trigger, description):
timestamp, next_run_time = get_crontab_next_run_time(trigger, self.tz)
value = {
'project_id': project_id,
'code_id': code_id,
'type': cron_type,
'trigger': trigger,
'trigger_type': 'cron',
'func': obj_to_ref(do),
'executor': 'thread_pool',
'description': description,
'next_run_time': timestamp,
}
try:
cron_id = self.impl.add_job(data=value, get_sql=False)
log.info(f'cron job->project_id:{project_id}-code_id:{code_id} add success')
return PostSuccess(msg='add cron job success', data={'cron_id': cron_id})
except IntegrityError:
log.error(f'cron job->project_id:{project_id}-code_id:{code_id} is already exist')
return Conflict(msg='cron job is already exist', errno=50001)
def update_job(self, cron_id, changes):
self.impl.update_job(cron_id, changes, get_sql=False)
log.info(f'update cron job->cron_id:{cron_id}->{changes} success')
return PatchSuccess(msg=f'cron job update success')
def delete_job(self, cron_id):
self.impl.delete_job(cron_id)
log.info(f'delete cron job->cron_id:{cron_id} success')
return DeleteSuccess()
def get_job(self, cron_id):
infos = self.impl.get_job(cron_id)
for info in infos:
info['next_run_time'] = utc_timestamp_to_datetime(info['next_run_time']).astimezone(self.tz)
self.datetime_to_str(info)
if len(infos):
return GetSuccess(msg='get cron job success', data=infos[0])
return NotFound(msg='job is not exist', errno=50001)
def get_jobs(self, page, limit, search, sort):
if sort.upper() not in ['ASC', 'DESC']:
return ParameterException(msg='sort must `asc` or `desc`')
infos, total = self.impl.get_jobs(page, limit, search, sort)
for info in infos:
info['next_run_time'] = utc_timestamp_to_datetime(info['next_run_time']).astimezone(self.tz)
self.datetime_to_str(info)
return GetSuccess(
msg='get cron job list success!',
data={
'items': infos,
'total': total,
'page': page,
'limit': limit
})
| [
"bspider.master.dao.CronDao",
"bspider.core.api.DeleteSuccess",
"bspider.master.log.info",
"bspider.core.api.ParameterException",
"bspider.master.log.error",
"bspider.core.api.NotFound",
"apscheduler.util.utc_timestamp_to_datetime",
"bspider.core.api.PostSuccess",
"apscheduler.util.obj_to_ref",
"b... | [((569, 578), 'bspider.master.dao.CronDao', 'CronDao', ([], {}), '()\n', (576, 578), False, 'from bspider.master.dao import CronDao\n'), ((693, 736), 'bspider.utils.tools.get_crontab_next_run_time', 'get_crontab_next_run_time', (['trigger', 'self.tz'], {}), '(trigger, self.tz)\n', (718, 736), False, 'from bspider.utils.tools import get_crontab_next_run_time\n'), ((1659, 1725), 'bspider.master.log.info', 'log.info', (['f"""update cron job->cron_id:{cron_id}->{changes} success"""'], {}), "(f'update cron job->cron_id:{cron_id}->{changes} success')\n", (1667, 1725), False, 'from bspider.master import log\n'), ((1741, 1785), 'bspider.core.api.PatchSuccess', 'PatchSuccess', ([], {'msg': 'f"""cron job update success"""'}), "(msg=f'cron job update success')\n", (1753, 1785), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((1868, 1923), 'bspider.master.log.info', 'log.info', (['f"""delete cron job->cron_id:{cron_id} success"""'], {}), "(f'delete cron job->cron_id:{cron_id} success')\n", (1876, 1923), False, 'from bspider.master import log\n'), ((1939, 1954), 'bspider.core.api.DeleteSuccess', 'DeleteSuccess', ([], {}), '()\n', (1952, 1954), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((2315, 2360), 'bspider.core.api.NotFound', 'NotFound', ([], {'msg': '"""job is not exist"""', 'errno': '(50001)'}), "(msg='job is not exist', errno=50001)\n", (2323, 2360), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((2790, 2907), 'bspider.core.api.GetSuccess', 'GetSuccess', ([], {'msg': '"""get cron job list success!"""', 'data': "{'items': infos, 'total': total, 'page': page, 'limit': limit}"}), "(msg='get cron job list success!', data={'items': infos, 'total':\n total, 'page': page, 'limit': limit})\n", (2800, 2907), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((944, 958), 'apscheduler.util.obj_to_ref', 'obj_to_ref', (['do'], {}), '(do)\n', (954, 958), False, 'from apscheduler.util import obj_to_ref, utc_timestamp_to_datetime\n'), ((1181, 1257), 'bspider.master.log.info', 'log.info', (['f"""cron job->project_id:{project_id}-code_id:{code_id} add success"""'], {}), "(f'cron job->project_id:{project_id}-code_id:{code_id} add success')\n", (1189, 1257), False, 'from bspider.master import log\n'), ((1277, 1343), 'bspider.core.api.PostSuccess', 'PostSuccess', ([], {'msg': '"""add cron job success"""', 'data': "{'cron_id': cron_id}"}), "(msg='add cron job success', data={'cron_id': cron_id})\n", (1288, 1343), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((2246, 2299), 'bspider.core.api.GetSuccess', 'GetSuccess', ([], {'msg': '"""get cron job success"""', 'data': 'infos[0]'}), "(msg='get cron job success', data=infos[0])\n", (2256, 2299), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((2480, 2531), 'bspider.core.api.ParameterException', 'ParameterException', ([], {'msg': '"""sort must `asc` or `desc`"""'}), "(msg='sort must `asc` or `desc`')\n", (2498, 2531), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((1387, 1474), 'bspider.master.log.error', 'log.error', (['f"""cron job->project_id:{project_id}-code_id:{code_id} is already exist"""'], {}), "(\n f'cron job->project_id:{project_id}-code_id:{code_id} is already exist')\n", (1396, 1474), False, 'from bspider.master import log\n'), ((1489, 1543), 'bspider.core.api.Conflict', 'Conflict', ([], {'msg': '"""cron job is already exist"""', 'errno': '(50001)'}), "(msg='cron job is already exist', errno=50001)\n", (1497, 1543), False, 'from bspider.core.api import BaseService, Conflict, PostSuccess, PatchSuccess, DeleteSuccess, GetSuccess, NotFound, ParameterException\n'), ((2095, 2143), 'apscheduler.util.utc_timestamp_to_datetime', 'utc_timestamp_to_datetime', (["info['next_run_time']"], {}), "(info['next_run_time'])\n", (2120, 2143), False, 'from apscheduler.util import obj_to_ref, utc_timestamp_to_datetime\n'), ((2666, 2714), 'apscheduler.util.utc_timestamp_to_datetime', 'utc_timestamp_to_datetime', (["info['next_run_time']"], {}), "(info['next_run_time'])\n", (2691, 2714), False, 'from apscheduler.util import obj_to_ref, utc_timestamp_to_datetime\n')] |
import torch.nn as nn
import torch.nn.functional as F
from im2mesh.layers import (
ResnetBlockFC, CResnetBlockConv1d,
CBatchNorm1d, CBatchNorm1d_legacy,
ResnetBlockConv1d
)
import torch
class FusionModule(nn.Module):
''' FusionModule class.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
'''
def __init__(self, dim=3, c_dim=128, n_views = 3, pt_size = 2048,
hidden_size=128, leaky=False):
super().__init__()
assert c_dim > 0
self.c_dim = c_dim
self.n_views = n_views
self.pt_size = pt_size
# Submodules
self.fc_p = nn.Linear(dim, hidden_size)
self.block0 = ResnetBlockFC(hidden_size)
self.block1 = ResnetBlockFC(hidden_size)
self.block2 = ResnetBlockFC(hidden_size)
self.conv0 = nn.Sequential(
nn.Conv1d(pt_size, 1024, 1),
nn.InstanceNorm1d(1024),
nn.ReLU()
)
self.conv1 = nn.Sequential(
nn.Conv1d(1024, 512, 1),
nn.InstanceNorm1d(512),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv1d(512, 1, 1),
nn.InstanceNorm1d(1),
nn.ReLU()
)
self.fc_c = nn.Linear(c_dim, hidden_size)
self.c_block0 = ResnetBlockFC(hidden_size)
self.c_block1 = ResnetBlockFC(hidden_size)
self.c_block2 = ResnetBlockFC(hidden_size)
self.predict_actv = nn.ReLU()
self.predict_fc0 = nn.Linear(2 * hidden_size, 1024)
self.predict_fc1 = nn.Linear(1024, pt_size)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, c, logits, **kwargs):
batch_size, T, D = p.size()
batch_size, n_views, c_dim = c.size()
# p : batch_size * pt_size * 3
net = self.fc_p(p)
net = self.block0(net)
net = self.conv0(net)
net = self.block1(net)
net = self.conv1(net)
net = self.block2(net)
net = self.conv2(net)
# net : batch_size * 1 * 128
# c : batch_size * n_views * c_dim
net_c = self.fc_c(c)
net_c = self.c_block0(net_c)
net_c = self.c_block1(net_c)
net_c = self.c_block2(net_c)
# net_c : batch_size * n_views * 128
net = net.repeat((1,self.n_views,1))
net = torch.cat((net,net_c), 1)
net = self.predict_actv(self.predict_fc0(net))
net = self.predict_fc1(net)
net = F.softmax( net , dim=1 )
# net : batch_size * n_views * pt_size
# logits : batch_size * n_views * pt_size
out = (logits * net).sum(1)
return out | [
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.functional.leaky_relu",
"torch.nn.InstanceNorm1d",
"im2mesh.layers.ResnetBlockFC",
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.cat"
] | [((781, 808), 'torch.nn.Linear', 'nn.Linear', (['dim', 'hidden_size'], {}), '(dim, hidden_size)\n', (790, 808), True, 'import torch.nn as nn\n'), ((831, 857), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (844, 857), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((880, 906), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (893, 906), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((929, 955), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (942, 955), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((1409, 1438), 'torch.nn.Linear', 'nn.Linear', (['c_dim', 'hidden_size'], {}), '(c_dim, hidden_size)\n', (1418, 1438), True, 'import torch.nn as nn\n'), ((1463, 1489), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (1476, 1489), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((1514, 1540), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (1527, 1540), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((1565, 1591), 'im2mesh.layers.ResnetBlockFC', 'ResnetBlockFC', (['hidden_size'], {}), '(hidden_size)\n', (1578, 1591), False, 'from im2mesh.layers import ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ResnetBlockConv1d\n'), ((1621, 1630), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1628, 1630), True, 'import torch.nn as nn\n'), ((1658, 1690), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_size)', '(1024)'], {}), '(2 * hidden_size, 1024)\n', (1667, 1690), True, 'import torch.nn as nn\n'), ((1718, 1742), 'torch.nn.Linear', 'nn.Linear', (['(1024)', 'pt_size'], {}), '(1024, pt_size)\n', (1727, 1742), True, 'import torch.nn as nn\n'), ((2584, 2610), 'torch.cat', 'torch.cat', (['(net, net_c)', '(1)'], {}), '((net, net_c), 1)\n', (2593, 2610), False, 'import torch\n'), ((2716, 2737), 'torch.nn.functional.softmax', 'F.softmax', (['net'], {'dim': '(1)'}), '(net, dim=1)\n', (2725, 2737), True, 'import torch.nn.functional as F\n'), ((1005, 1032), 'torch.nn.Conv1d', 'nn.Conv1d', (['pt_size', '(1024)', '(1)'], {}), '(pt_size, 1024, 1)\n', (1014, 1032), True, 'import torch.nn as nn\n'), ((1046, 1069), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', (['(1024)'], {}), '(1024)\n', (1063, 1069), True, 'import torch.nn as nn\n'), ((1083, 1092), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1090, 1092), True, 'import torch.nn as nn\n'), ((1151, 1174), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1024)', '(512)', '(1)'], {}), '(1024, 512, 1)\n', (1160, 1174), True, 'import torch.nn as nn\n'), ((1188, 1210), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', (['(512)'], {}), '(512)\n', (1205, 1210), True, 'import torch.nn as nn\n'), ((1224, 1233), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1231, 1233), True, 'import torch.nn as nn\n'), ((1292, 1312), 'torch.nn.Conv1d', 'nn.Conv1d', (['(512)', '(1)', '(1)'], {}), '(512, 1, 1)\n', (1301, 1312), True, 'import torch.nn as nn\n'), ((1326, 1346), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', (['(1)'], {}), '(1)\n', (1343, 1346), True, 'import torch.nn as nn\n'), ((1360, 1369), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1367, 1369), True, 'import torch.nn as nn\n'), ((1847, 1867), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (1859, 1867), True, 'import torch.nn.functional as F\n')] |
import unittest
from unittest.mock import *
from sample.checker import Checker
class TestChecker(unittest.TestCase):
def setUp(self):
self.temp = Checker()
def test_checker_before(self):
file = 'file.wav'
#prepare mock
self.temp.temp.getTime = Mock(name = 'getTime')
self.temp.temp.getTime.return_value = 10
self.temp.temp.resetWav = Mock(name = 'resetWav')
self.temp.temp.resetWav.return_value = False
#testing
result = self.temp.remainder(file)
self.assertEqual(result, False)
def test_checker_after(self):
file = 'file.wav'
# prepare mock
self.temp.temp.getTime = Mock(name = 'getTime')
self.temp.temp.getTime.return_value = 20
self.temp.temp.wavWasPlayed = Mock(name = 'wavWasPlayed')
self.temp.temp.wavWasPlayed.return_value = True
# testing
result = self.temp.remainder(file)
self.assertEqual(result, True)
def tearDown(self):
self.temp = None | [
"sample.checker.Checker"
] | [((161, 170), 'sample.checker.Checker', 'Checker', ([], {}), '()\n', (168, 170), False, 'from sample.checker import Checker\n')] |
# Generated by Django 2.0.7 on 2018-09-21 11:24
import django.db.models.deletion
import enumfields.fields
from django.conf import settings
from django.db import migrations, models
import campaigns.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taxonomy', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('team', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10000)),
('type', enumfields.fields.EnumField(enum=campaigns.models.CampaignType, max_length=1000)),
('image', models.ImageField(blank=True, height_field='height_field', null=True,
upload_to=campaigns.models.campaign_image_upload_location,
width_field='width_field')),
('height_field', models.IntegerField(default=0)),
('width_field', models.IntegerField(default=0)),
('description', models.TextField()),
('start_time', models.DateField()),
('end_time', models.DateField()),
],
),
migrations.CreateModel(
name='CampaignEnrollmentRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField(blank=True, null=True)),
('campaign',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='request_campaign',
to='campaigns.Campaign')),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CampaignPartyRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('type', enumfields.fields.EnumField(enum=campaigns.models.CampaignPartyRelationType, max_length=100)),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign')),
('content_type',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='CampaignTermRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', enumfields.fields.EnumField(enum=campaigns.models.CampaignTermRealtionType, max_length=100)),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='campaign',
to='campaigns.Campaign')),
('term', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='term',
to='taxonomy.Term')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField(blank=True, null=True)),
('price', models.IntegerField()),
('profile_image', models.ImageField(blank=True, height_field='height_field', null=True, upload_to='',
width_field='width_field')),
('height_field', models.IntegerField(default=0, null=True)),
('width_field', models.IntegerField(default=0, null=True)),
('seller',
models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller',
to='team.Team')),
],
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.ImageField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
... | [((296, 353), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (327, 353), False, 'from django.db import migrations, models\n'), ((617, 710), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (633, 710), False, 'from django.db import migrations, models\n'), ((735, 769), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10000)'}), '(max_length=10000)\n', (751, 769), False, 'from django.db import migrations, models\n'), ((906, 1070), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'height_field': '"""height_field"""', 'null': '(True)', 'upload_to': 'campaigns.models.campaign_image_upload_location', 'width_field': '"""width_field"""'}), "(blank=True, height_field='height_field', null=True,\n upload_to=campaigns.models.campaign_image_upload_location, width_field=\n 'width_field')\n", (923, 1070), False, 'from django.db import migrations, models\n'), ((1185, 1215), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1204, 1215), False, 'from django.db import migrations, models\n'), ((1250, 1280), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1269, 1280), False, 'from django.db import migrations, models\n'), ((1315, 1333), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1331, 1333), False, 'from django.db import migrations, models\n'), ((1367, 1385), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1383, 1385), False, 'from django.db import migrations, models\n'), ((1417, 1435), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1433, 1435), False, 'from django.db import migrations, models\n'), ((1586, 1679), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1602, 1679), False, 'from django.db import migrations, models\n'), ((1703, 1742), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1719, 1742), False, 'from django.db import migrations, models\n'), ((1791, 1916), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""request_campaign"""', 'to': '"""campaigns.Campaign"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='request_campaign', to='campaigns.Campaign')\n", (1808, 1916), False, 'from django.db import migrations, models\n'), ((1974, 2080), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(1)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(default=1, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (1991, 2080), False, 'from django.db import migrations, models\n'), ((2266, 2359), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2282, 2359), False, 'from django.db import migrations, models\n'), ((2388, 2417), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (2415, 2417), False, 'from django.db import migrations, models\n'), ((2569, 2661), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""campaigns.Campaign"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'campaigns.Campaign')\n", (2586, 2661), False, 'from django.db import migrations, models\n'), ((2709, 2807), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""contenttypes.ContentType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'contenttypes.ContentType')\n", (2726, 2807), False, 'from django.db import migrations, models\n'), ((2948, 3041), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2964, 3041), False, 'from django.db import migrations, models\n'), ((3188, 3305), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""campaign"""', 'to': '"""campaigns.Campaign"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='campaign', to='campaigns.Campaign')\n", (3205, 3305), False, 'from django.db import migrations, models\n'), ((3375, 3483), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""term"""', 'to': '"""taxonomy.Term"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='term', to='taxonomy.Term')\n", (3392, 3483), False, 'from django.db import migrations, models\n'), ((3654, 3747), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3670, 3747), False, 'from django.db import migrations, models\n'), ((3771, 3826), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (3787, 3826), False, 'from django.db import migrations, models\n'), ((3861, 3900), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3877, 3900), False, 'from django.db import migrations, models\n'), ((3929, 3950), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3948, 3950), False, 'from django.db import migrations, models\n'), ((3987, 4101), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'height_field': '"""height_field"""', 'null': '(True)', 'upload_to': '""""""', 'width_field': '"""width_field"""'}), "(blank=True, height_field='height_field', null=True,\n upload_to='', width_field='width_field')\n", (4004, 4101), False, 'from django.db import migrations, models\n'), ((4185, 4226), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4204, 4226), False, 'from django.db import migrations, models\n'), ((4261, 4302), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'null': '(True)'}), '(default=0, null=True)\n', (4280, 4302), False, 'from django.db import migrations, models\n'), ((4349, 4466), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""seller"""', 'to': '"""team.Team"""'}), "(blank=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='seller', to='team.Team')\n", (4366, 4466), False, 'from django.db import migrations, models\n')] |
'''
Module that is the entry point into the game
'''
from SnakesVsLadder.src import game
game.main() | [
"SnakesVsLadder.src.game.main"
] | [((90, 101), 'SnakesVsLadder.src.game.main', 'game.main', ([], {}), '()\n', (99, 101), False, 'from SnakesVsLadder.src import game\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 12:47:00 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AffinityPropagation
from sklearn.metrics import f1_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.preprocessing import LabelEncoder
def ap_cluster_k(x, K, preference_init=-1.0, max_iter=30,
c=None, iter_finetune=10):
'''
Clustering of x by affinity propagation which the number of cluster is K.
args:
x (ndarray):
Data matrix.
K (int):
Target number of clusters.
max_iter (int):
Number of trials for bisection search.
c (ndarray, optional):
Class labels of x. If this parameter is specified, the function
try to find the better solution by random search.
iter_finetune (int):
Number of steps for the random search.
'''
# first, search rough lower bound of the preference
assert preference_init < 0, "preference_init must be negative."
p = float(preference_init) # preference parameter
p_upper = 0
for i in range(5):
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current > K:
p_upper = p
k_upper = k_current
p *= 10
else:
p_lower = p
k_lower = k_current
break
else:
raise RuntimeError("Can't find initial lower bound for preference."
" Try another value of p_initial.")
# search the preference by bisection method
for i in range(max_iter):
p = (p_lower + p_upper) / 2
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
# if the current k goes out of bounds then retry with perturbed p
while k_current < k_lower or k_current > k_upper:
print("retry")
p += np.random.uniform(p_lower, p_upper) / 10
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
print('K = {}, k_current = {}, p = {}'.format(K, k_current, p))
print('{}:{}, {}:{}, {}:{}'.format(k_lower, p_lower, k_current, p,
k_upper, p_upper))
if k_current < K:
p_lower = p
k_lower = k_current
elif k_current > K:
p_upper = p
k_upper = k_current
else:
break
else:
raise RuntimeError("Can't find a preference to form K clusters."
" Try another value of p_initial.")
if c is None:
return ap
# Search further better preference in terms of NMI score by random search
p_best = p
score_best = normalized_mutual_info_score(c, ap.predict(y))
print('initial score:', score_best)
print()
for i in range(iter_finetune):
p = np.random.normal(p_best, (p_upper - p_lower) / 2)
if p < p_lower or p > p_upper: # where p is rejected
print('reject')
continue
ap = AffinityPropagation(preference=p).fit(y)
k_current = len(ap.cluster_centers_indices_)
if k_current < K and p > p_lower:
p_lower = p
elif k_current > K and p < p_upper:
p_upper = p
else: # wgere k_current is K
score = normalized_mutual_info_score(c, ap.predict(y))
if score > score_best:
print("update p {} -> {}".format(p_best, p))
p_best = p
score_best = score
print('p: {}, {}, {}'.format(p_lower, p, p_upper))
print('score: {}'.format(score_best))
print()
return AffinityPropagation(preference=p_best).fit(y)
if __name__ == '__main__':
y_train = np.load('y_train.npy')
c_train = np.load('c_train.npy').ravel()
y_test = np.load('y_test.npy')
c_test = np.load('c_test.npy').ravel()
c_train = LabelEncoder().fit_transform(c_train)
c_test = LabelEncoder().fit_transform(c_test)
K = 40
# K = len(np.unique(c_train))
y = y_train[c_train.ravel() < K]
c = c_train[c_train < K]
# y = y_test[c_test.ravel() < K]
# c = c_test[c_test < K]
ap = ap_cluster_k(y, K, preference_init=-1.0, c=c, iter_finetune=30)
c_pred = ap.predict(y)
print(normalized_mutual_info_score(c, c_pred))
plt.plot(np.vstack((c_pred, c)).T)
plt.show()
# print f1_score(c, c_pred)
| [
"numpy.random.normal",
"sklearn.preprocessing.LabelEncoder",
"sklearn.cluster.AffinityPropagation",
"numpy.vstack",
"numpy.random.uniform",
"sklearn.metrics.normalized_mutual_info_score",
"numpy.load",
"matplotlib.pyplot.show"
] | [((4137, 4159), 'numpy.load', 'np.load', (['"""y_train.npy"""'], {}), "('y_train.npy')\n", (4144, 4159), True, 'import numpy as np\n'), ((4218, 4239), 'numpy.load', 'np.load', (['"""y_test.npy"""'], {}), "('y_test.npy')\n", (4225, 4239), True, 'import numpy as np\n'), ((4757, 4767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4765, 4767), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3300), 'numpy.random.normal', 'np.random.normal', (['p_best', '((p_upper - p_lower) / 2)'], {}), '(p_best, (p_upper - p_lower) / 2)\n', (3267, 3300), True, 'import numpy as np\n'), ((4673, 4712), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['c', 'c_pred'], {}), '(c, c_pred)\n', (4701, 4712), False, 'from sklearn.metrics import normalized_mutual_info_score\n'), ((4048, 4086), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p_best'}), '(preference=p_best)\n', (4067, 4086), False, 'from sklearn.cluster import AffinityPropagation\n'), ((4174, 4196), 'numpy.load', 'np.load', (['"""c_train.npy"""'], {}), "('c_train.npy')\n", (4181, 4196), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.load', 'np.load', (['"""c_test.npy"""'], {}), "('c_test.npy')\n", (4260, 4274), True, 'import numpy as np\n'), ((4298, 4312), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4310, 4312), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4349, 4363), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4361, 4363), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4727, 4749), 'numpy.vstack', 'np.vstack', (['(c_pred, c)'], {}), '((c_pred, c))\n', (4736, 4749), True, 'import numpy as np\n'), ((1194, 1227), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (1213, 1227), False, 'from sklearn.cluster import AffinityPropagation\n'), ((1755, 1788), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (1774, 1788), False, 'from sklearn.cluster import AffinityPropagation\n'), ((2235, 2270), 'numpy.random.uniform', 'np.random.uniform', (['p_lower', 'p_upper'], {}), '(p_lower, p_upper)\n', (2252, 2270), True, 'import numpy as np\n'), ((3425, 3458), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (3444, 3458), False, 'from sklearn.cluster import AffinityPropagation\n'), ((2293, 2326), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'preference': 'p'}), '(preference=p)\n', (2312, 2326), False, 'from sklearn.cluster import AffinityPropagation\n')] |
import sys
sys.path.append('../../')
import keras2caffe
DATA_DIR='../../data/'
import caffe
import cv2
import numpy as np
import sys
sys.path.append('/media/toshiba_ml/models/keras-models/keras-squeezenet')
from keras_squeezenet import SqueezeNet
#TensorFlow backend uses all GPU memory by default, so we need limit
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
#converting
keras_model = SqueezeNet()
keras2caffe.convert(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel')
#testing the model
caffe.set_mode_gpu()
net = caffe.Net('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST)
img = cv2.imread(DATA_DIR+'bear.jpg')
img = cv2.resize(img, (227, 227))
img = img[...,::-1] #RGB 2 BGR
data = np.array(img, dtype=np.float32)
data = data.transpose((2, 0, 1))
data.shape = (1,) + data.shape
data -= 128
net.blobs['data'].data[...] = data
out = net.forward()
preds = out['global_average_pooling2d_1']
classes = eval(open(DATA_DIR+'class_names.txt', 'r').read())
print("Class is: " + classes[np.argmax(preds)])
print("Certainty is: " + str(preds[0][np.argmax(preds)]))
| [
"keras_squeezenet.SqueezeNet",
"tensorflow.Session",
"caffe.set_mode_gpu",
"keras2caffe.convert",
"numpy.argmax",
"numpy.array",
"caffe.Net",
"tensorflow.ConfigProto",
"cv2.resize",
"sys.path.append",
"cv2.imread"
] | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((136, 209), 'sys.path.append', 'sys.path.append', (['"""/media/toshiba_ml/models/keras-models/keras-squeezenet"""'], {}), "('/media/toshiba_ml/models/keras-models/keras-squeezenet')\n", (151, 209), False, 'import sys\n'), ((411, 427), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (425, 427), True, 'import tensorflow as tf\n'), ((551, 563), 'keras_squeezenet.SqueezeNet', 'SqueezeNet', ([], {}), '()\n', (561, 563), False, 'from keras_squeezenet import SqueezeNet\n'), ((565, 641), 'keras2caffe.convert', 'keras2caffe.convert', (['keras_model', '"""deploy.prototxt"""', '"""SqueezeNet.caffemodel"""'], {}), "(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel')\n", (584, 641), False, 'import keras2caffe\n'), ((663, 683), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (681, 683), False, 'import caffe\n'), ((691, 756), 'caffe.Net', 'caffe.Net', (['"""deploy.prototxt"""', '"""SqueezeNet.caffemodel"""', 'caffe.TEST'], {}), "('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST)\n", (700, 756), False, 'import caffe\n'), ((764, 797), 'cv2.imread', 'cv2.imread', (["(DATA_DIR + 'bear.jpg')"], {}), "(DATA_DIR + 'bear.jpg')\n", (774, 797), False, 'import cv2\n'), ((803, 830), 'cv2.resize', 'cv2.resize', (['img', '(227, 227)'], {}), '(img, (227, 227))\n', (813, 830), False, 'import cv2\n'), ((871, 902), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (879, 902), True, 'import numpy as np\n'), ((497, 522), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (507, 522), True, 'import tensorflow as tf\n'), ((1170, 1186), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (1179, 1186), True, 'import numpy as np\n'), ((1227, 1243), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (1236, 1243), True, 'import numpy as np\n')] |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from florence.models import User, Lib, Intlib
from django.core import serializers
from rest_framework.renderers import JSONRenderer
from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector
from florence.serializers import IntlibSearchSerializer
import json
# Create your views here.
def intro(request):
return render(request, 'florence/intro.html')
def dashboard(request):
return render(request, 'florence/dashboard.html')
def miniapps(request):
return render(request, 'florence/miniapps.html')
def develop(request):
return render(request, 'florence/develop.html')
def my(request):
return render(request, 'florence/my.html')
# def getcode(request):
# modulekey = request.GET.get('modulekey', None)
#
# try:
# _modulekey = modulekey.split(':')
# user_email = _modulekey[1]
# module_name = _modulekey[2]
# print(user_email, module_name)
# module = Module.objects.get(author__email=user_email, name=module_name)
# return JsonResponse({'success':True, 'code':module.code}, safe=False)
#
# except:
# return JsonResponse({'success':False}, safe=False)
# def treefy(mod):
# imports = {}
# for imp in mod.import_set.all():
# if imp.typeof=='moduleimport':
# imports[imp.alias] = treefy(imp.moduleimport.module)
# elif imp.typeof=='urlimport':
# imports[imp.alias] = imp.urlimport.url
#
# return {
# 'imports': imports,
# 'code': mod.code,
# 'author': mod.author.email,
# 'author_avatar': mod.author.socialaccount_set.all()[0].get_avatar_url(),
# 'name': mod.name,
# 'description': mod.description,
# 'exports': [exp.strip() for exp in mod.exports.split(',')]
# }
#
#
# def moduletree(request, pk):
# mod = Module.objects.get(pk=pk)
# return JsonResponse({'success':True, 'tree':treefy(mod)}, safe=False)
#
#
# def import_module(request, pk, alias):
# mod = Module.objects.get(pk=pk)
# mod.alias = alias
# return render(request, 'florence/import_module.html', {'module':mod})
def familize_old(intlib):
imports = {}
for imp in intlib.imports.all():
if imp.lib.typeof=='intlib':
imports[imp.alias] = familize(imp.lib.intlib)
elif imp.lib.typeof=='extlib':
imports[imp.alias] = imp.lib.extlib.url
return {
'id': intlib.pk,
'imports': imports,
'code': intlib.code,
'author': {
'id': intlib.author.pk,
'avatar': intlib.author.socialaccount_set.all()[0].get_avatar_url(),
},
'name': intlib.name,
'version': intlib.version,
'description': intlib.description,
'exports': [exp.strip() for exp in intlib.exports.split(',')]
}
def familize(lib):
output = {
'id': lib.pk,
'name': lib.name,
'version': lib.version,
'description': lib.description,
}
if lib.typeof=='intlib':
output['code'] = lib.intlib.code
output['author'] = {
'id': lib.intlib.author.pk,
'avatar': lib.intlib.author.socialaccount_set.all()[0].get_avatar_url(),
}
output['imports'] = {imp.alias:familize(imp.lib) for imp in lib.imports.all()}
output['exports'] = [exp.strip() for exp in lib.intlib.exports.split(',')]
else:
output['url'] = lib.extlib.url
output['author'] = lib.extlib.author
return output
def lib_family(request, pk):
lib = Lib.objects.get(pk=pk)
# ser = serializers.serialize('python', [lib], use_natural_foreign_keys=True)
# print(JSONRenderer().render(IntlibSerializer(Intlib.objects.all(), many=True).data))
return JsonResponse({'family':familize(lib)}, safe=False)
def lib_saerch(request):
n = 3
q = request.GET.get('q', None)
try:
vector = SearchVector('code', 'description', 'keywords')
query = SearchQuery(q)
searched = Intlib.objects.annotate(rank=SearchRank(vector, query)).order_by('-rank')[:n]
result = IntlibSearchSerializer(searched, many=True).data
# result = JSONRenderer().render(result)
# result = list(searched.values('name', 'author_avatar', 'description', 'version'))
# result = list(searched.values('name', 'author__avatar', 'description', 'version'))
return JsonResponse({'result':result}, safe=False)
except:
return JsonResponse({'result':[]}, safe=False)
| [
"django.shortcuts.render",
"django.http.JsonResponse",
"django.contrib.postgres.search.SearchQuery",
"django.contrib.postgres.search.SearchRank",
"florence.serializers.IntlibSearchSerializer",
"florence.models.Lib.objects.get",
"django.contrib.postgres.search.SearchVector"
] | [((449, 487), 'django.shortcuts.render', 'render', (['request', '"""florence/intro.html"""'], {}), "(request, 'florence/intro.html')\n", (455, 487), False, 'from django.shortcuts import render\n'), ((524, 566), 'django.shortcuts.render', 'render', (['request', '"""florence/dashboard.html"""'], {}), "(request, 'florence/dashboard.html')\n", (530, 566), False, 'from django.shortcuts import render\n'), ((602, 643), 'django.shortcuts.render', 'render', (['request', '"""florence/miniapps.html"""'], {}), "(request, 'florence/miniapps.html')\n", (608, 643), False, 'from django.shortcuts import render\n'), ((678, 718), 'django.shortcuts.render', 'render', (['request', '"""florence/develop.html"""'], {}), "(request, 'florence/develop.html')\n", (684, 718), False, 'from django.shortcuts import render\n'), ((748, 783), 'django.shortcuts.render', 'render', (['request', '"""florence/my.html"""'], {}), "(request, 'florence/my.html')\n", (754, 783), False, 'from django.shortcuts import render\n'), ((3646, 3668), 'florence.models.Lib.objects.get', 'Lib.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (3661, 3668), False, 'from florence.models import User, Lib, Intlib\n'), ((4004, 4051), 'django.contrib.postgres.search.SearchVector', 'SearchVector', (['"""code"""', '"""description"""', '"""keywords"""'], {}), "('code', 'description', 'keywords')\n", (4016, 4051), False, 'from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector\n'), ((4068, 4082), 'django.contrib.postgres.search.SearchQuery', 'SearchQuery', (['q'], {}), '(q)\n', (4079, 4082), False, 'from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector\n'), ((4495, 4539), 'django.http.JsonResponse', 'JsonResponse', (["{'result': result}"], {'safe': '(False)'}), "({'result': result}, safe=False)\n", (4507, 4539), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((4197, 4240), 'florence.serializers.IntlibSearchSerializer', 'IntlibSearchSerializer', (['searched'], {'many': '(True)'}), '(searched, many=True)\n', (4219, 4240), False, 'from florence.serializers import IntlibSearchSerializer\n'), ((4567, 4607), 'django.http.JsonResponse', 'JsonResponse', (["{'result': []}"], {'safe': '(False)'}), "({'result': []}, safe=False)\n", (4579, 4607), False, 'from django.http import HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((4131, 4156), 'django.contrib.postgres.search.SearchRank', 'SearchRank', (['vector', 'query'], {}), '(vector, query)\n', (4141, 4156), False, 'from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector\n')] |
import random
from flask_testing import TestCase
from url_shortener.app import app
from url_shortener.app import db
class URLShortenerTests(TestCase):
def setUp(self):
db.create_all()
db.session.commit()
def create_app(self):
app.config.from_object('url_shortener.config.TestingConfig')
return app
def tearDown(self):
db.session.remove()
db.drop_all()
def test_post_url_check_redirect(self):
# This should make random.choice within the app predictable
# This only matters so we get the same redirect url every time
# Which isn't that important, but I do not like random factors within unit tests
random.seed(1)
slug = 'iK2ZWeqh' # This won't change if seed doesn't
response = self.client.post('/shorten_url', data={'url': 'https://rwgeaston.com'})
self.assertEqual(response.status_code, 201)
self.assertEqual(
response.get_json(),
{
'url': 'https://rwgeaston.com',
'slug': slug,
'shortened_url': f'http://localhost/r/{slug}',
'relative_shortened_url': f'r/{slug}',
}
)
response = self.client.get(f'/r/{slug}')
self.assertEqual(response.status_code, 301)
self.assertIn(
"https://rwgeaston.com",
response.data.decode('utf8'),
)
def test_rest_endpoints(self):
random.seed(2)
slug = '9382dffx'
response = self.client.post('/shorten_url', data={'url': 'https://rwgeaston.com'})
expected_response = {
'url': 'https://rwgeaston.com',
'slug': slug,
'shortened_url': f'http://localhost/r/{slug}',
'relative_shortened_url': f'r/{slug}',
}
self.assertEqual(response.status_code, 201)
self.assertEqual(response.get_json(), expected_response)
response = self.client.get(f'/short_url/{slug}')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_json(), expected_response)
expected_response['url'] = 'https://rwgeaston.com/admin/'
response = self.client.patch(f'/short_url/{slug}', data={
'url': 'https://rwgeaston.com/admin/',
# Note we did NOT change this in expected response
'shortened_url': 'complete nonsense, thankfully will be ignored',
})
self.assertEqual(response.status_code, 201)
self.assertEqual(response.get_json(), expected_response)
response = self.client.delete(f'/short_url/{slug}')
self.assertEqual(response.status_code, 204)
response = self.client.get(f'/short_url/{slug}')
self.assertEqual(response.status_code, 404)
def test_bad_luck_same_slug_twice(self):
random.seed(3)
slug = 'pLIix6ME' # This won't change if seed doesn't
response = self.client.post('/shorten_url', data={'url': 'https://rwgeaston.com'})
self.assertEqual(response.status_code, 201)
self.assertEqual(
response.get_json(),
{
'url': 'https://rwgeaston.com',
'slug': slug,
'shortened_url': f'http://localhost/r/{slug}',
'relative_shortened_url': f'r/{slug}',
}
)
random.seed(3) # oops will get same slug again
response = self.client.post('/shorten_url', data={'url': 'https://rwgeaston.com'})
self.assertEqual(response.status_code, 201)
self.assertEqual(
response.get_json()['slug'],
'OLeMa61E', # it failed so POST endpoint tried a different one
)
| [
"url_shortener.app.db.create_all",
"url_shortener.app.db.session.commit",
"url_shortener.app.db.session.remove",
"url_shortener.app.db.drop_all",
"random.seed",
"url_shortener.app.app.config.from_object"
] | [((183, 198), 'url_shortener.app.db.create_all', 'db.create_all', ([], {}), '()\n', (196, 198), False, 'from url_shortener.app import db\n'), ((207, 226), 'url_shortener.app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (224, 226), False, 'from url_shortener.app import db\n'), ((262, 322), 'url_shortener.app.app.config.from_object', 'app.config.from_object', (['"""url_shortener.config.TestingConfig"""'], {}), "('url_shortener.config.TestingConfig')\n", (284, 322), False, 'from url_shortener.app import app\n'), ((375, 394), 'url_shortener.app.db.session.remove', 'db.session.remove', ([], {}), '()\n', (392, 394), False, 'from url_shortener.app import db\n'), ((403, 416), 'url_shortener.app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (414, 416), False, 'from url_shortener.app import db\n'), ((698, 712), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (709, 712), False, 'import random\n'), ((1473, 1487), 'random.seed', 'random.seed', (['(2)'], {}), '(2)\n', (1484, 1487), False, 'import random\n'), ((2851, 2865), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (2862, 2865), False, 'import random\n'), ((3376, 3390), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (3387, 3390), False, 'import random\n')] |
import sys
from dataclasses import dataclass
from unittest import TestCase
from custom_imports.importer import Importer, SimpleFinder, SimpleLoader
@dataclass
class SimpleLocator:
fullname: str
@dataclass
class SimpleModule:
value: str = ""
def set_value(self, locator):
self.value = locator.fullname
class TestImporter(TestCase):
def setUp(self):
finder = SimpleFinder(
locate_module=lambda fullname, path, target: SimpleLocator(fullname)
)
loader = SimpleLoader(
module_type=SimpleModule, load_module=SimpleModule.set_value
)
self.importer = Importer(finder=finder, loader=loader)
def test_importer(self):
importer = self.importer
with self.subTest("Module unavailable before registration"), self.assertRaises(
ImportError
):
import fake_module
with self.subTest("Module importable"):
importer.register()
import fake_module
self.assertIsInstance(fake_module, SimpleModule)
self.assertEqual("fake_module", fake_module.value)
with self.subTest("Module unavailable after deregistration"):
del sys.modules["fake_module"]
importer.deregister()
with self.assertRaises(ImportError):
import fake_module
def test_importer_context_manager(self):
with self.subTest("Module available inside context manager"):
with self.importer:
import fake_module
self.assertIsInstance(fake_module, SimpleModule)
self.assertEqual("fake_module", fake_module.value)
with self.subTest("Module unavailable outside context manager"):
del sys.modules["fake_module"]
with self.assertRaises(ImportError):
import fake_module
| [
"custom_imports.importer.Importer",
"custom_imports.importer.SimpleLoader"
] | [((520, 594), 'custom_imports.importer.SimpleLoader', 'SimpleLoader', ([], {'module_type': 'SimpleModule', 'load_module': 'SimpleModule.set_value'}), '(module_type=SimpleModule, load_module=SimpleModule.set_value)\n', (532, 594), False, 'from custom_imports.importer import Importer, SimpleFinder, SimpleLoader\n'), ((642, 680), 'custom_imports.importer.Importer', 'Importer', ([], {'finder': 'finder', 'loader': 'loader'}), '(finder=finder, loader=loader)\n', (650, 680), False, 'from custom_imports.importer import Importer, SimpleFinder, SimpleLoader\n')] |
import unittest
from parameterized import parameterized as p
from solns.removeDuplicatesFromSortedArr.removeDuplicatesFromSortedArr import *
class Test_RemoveDuplicatesFromSortedArr(unittest.TestCase):
@p.expand([
[[1,1,2],2],[[],0],[[1],1],[[1,2],2],
[[0,0,1,1,1,2,2,3,3,4],5],
[[0,0,0,2,2,2,4,4,4],3]
])
def test_naive(self,nums,expected):
self.assertEqual(Solution.naive(nums),expected) | [
"parameterized.parameterized.expand"
] | [((208, 342), 'parameterized.parameterized.expand', 'p.expand', (['[[[1, 1, 2], 2], [[], 0], [[1], 1], [[1, 2], 2], [[0, 0, 1, 1, 1, 2, 2, 3, \n 3, 4], 5], [[0, 0, 0, 2, 2, 2, 4, 4, 4], 3]]'], {}), '([[[1, 1, 2], 2], [[], 0], [[1], 1], [[1, 2], 2], [[0, 0, 1, 1, 1, \n 2, 2, 3, 3, 4], 5], [[0, 0, 0, 2, 2, 2, 4, 4, 4], 3]])\n', (216, 342), True, 'from parameterized import parameterized as p\n')] |
"""
Test cases for the regi0.geographic.utils.get_nearest_year function.
"""
import numpy as np
import pandas as pd
import pytest
from regi0.geographic.utils import get_nearest_year
@pytest.fixture()
def dates():
return pd.Series(["17/08/1945", np.nan, "21/09/2011", "01/01/1984", "17/04/2009"])
@pytest.fixture()
def years():
return [1963, 1980, 2010, 2014]
def test_defaults(dates, years):
result = get_nearest_year(dates, years)
expected = pd.Series([1963, np.nan, 2010, 1980, 1980])
pd.testing.assert_series_equal(result, expected)
def test_no_rounding_unmatched(dates, years):
result = get_nearest_year(dates, years, round_unmatched=False)
expected = pd.Series([np.nan, np.nan, 2010, 1980, 1980])
pd.testing.assert_series_equal(result, expected)
def test_direction_nearest(dates, years):
result = get_nearest_year(dates, years, direction="nearest")
expected = pd.Series([1963, np.nan, 2010, 1980, 2010])
pd.testing.assert_series_equal(result, expected)
def test_direction_forward(dates, years):
result = get_nearest_year(dates, years, direction="forward")
expected = pd.Series([1963, np.nan, 2014, 2010, 2010])
pd.testing.assert_series_equal(result, expected)
def test_repeated_years(dates):
years = [1963, 1980, 2010, 1963, 1980, 2014, 2010, 2014]
result = get_nearest_year(dates, years)
expected = pd.Series([1963, np.nan, 2010, 1980, 1980])
pd.testing.assert_series_equal(result, expected)
| [
"pytest.fixture",
"pandas.Series",
"pandas.testing.assert_series_equal",
"regi0.geographic.utils.get_nearest_year"
] | [((186, 202), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (200, 202), False, 'import pytest\n'), ((306, 322), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (320, 322), False, 'import pytest\n'), ((227, 302), 'pandas.Series', 'pd.Series', (["['17/08/1945', np.nan, '21/09/2011', '01/01/1984', '17/04/2009']"], {}), "(['17/08/1945', np.nan, '21/09/2011', '01/01/1984', '17/04/2009'])\n", (236, 302), True, 'import pandas as pd\n'), ((420, 450), 'regi0.geographic.utils.get_nearest_year', 'get_nearest_year', (['dates', 'years'], {}), '(dates, years)\n', (436, 450), False, 'from regi0.geographic.utils import get_nearest_year\n'), ((466, 509), 'pandas.Series', 'pd.Series', (['[1963, np.nan, 2010, 1980, 1980]'], {}), '([1963, np.nan, 2010, 1980, 1980])\n', (475, 509), True, 'import pandas as pd\n'), ((514, 562), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (544, 562), True, 'import pandas as pd\n'), ((624, 677), 'regi0.geographic.utils.get_nearest_year', 'get_nearest_year', (['dates', 'years'], {'round_unmatched': '(False)'}), '(dates, years, round_unmatched=False)\n', (640, 677), False, 'from regi0.geographic.utils import get_nearest_year\n'), ((693, 738), 'pandas.Series', 'pd.Series', (['[np.nan, np.nan, 2010, 1980, 1980]'], {}), '([np.nan, np.nan, 2010, 1980, 1980])\n', (702, 738), True, 'import pandas as pd\n'), ((743, 791), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (773, 791), True, 'import pandas as pd\n'), ((849, 900), 'regi0.geographic.utils.get_nearest_year', 'get_nearest_year', (['dates', 'years'], {'direction': '"""nearest"""'}), "(dates, years, direction='nearest')\n", (865, 900), False, 'from regi0.geographic.utils import get_nearest_year\n'), ((916, 959), 'pandas.Series', 'pd.Series', (['[1963, np.nan, 2010, 1980, 2010]'], {}), '([1963, np.nan, 2010, 1980, 2010])\n', (925, 959), True, 'import pandas as pd\n'), ((964, 1012), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (994, 1012), True, 'import pandas as pd\n'), ((1070, 1121), 'regi0.geographic.utils.get_nearest_year', 'get_nearest_year', (['dates', 'years'], {'direction': '"""forward"""'}), "(dates, years, direction='forward')\n", (1086, 1121), False, 'from regi0.geographic.utils import get_nearest_year\n'), ((1137, 1180), 'pandas.Series', 'pd.Series', (['[1963, np.nan, 2014, 2010, 2010]'], {}), '([1963, np.nan, 2014, 2010, 2010])\n', (1146, 1180), True, 'import pandas as pd\n'), ((1185, 1233), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1215, 1233), True, 'import pandas as pd\n'), ((1342, 1372), 'regi0.geographic.utils.get_nearest_year', 'get_nearest_year', (['dates', 'years'], {}), '(dates, years)\n', (1358, 1372), False, 'from regi0.geographic.utils import get_nearest_year\n'), ((1388, 1431), 'pandas.Series', 'pd.Series', (['[1963, np.nan, 2010, 1980, 1980]'], {}), '([1963, np.nan, 2010, 1980, 1980])\n', (1397, 1431), True, 'import pandas as pd\n'), ((1436, 1484), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1466, 1484), True, 'import pandas as pd\n')] |
# importing necessary packages
from keras.models import load_model
import argparse
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from preprocessing.preprocessing import AspectAwarePreprocessor, ImageToArrayPreprocessor, SimplePreprocessor
from preprocessing.datasets import SimpleDatasetLoader
from keras.layers import Input, Dense, Flatten, Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from imutils import paths
import numpy as np
import argparse
import glob
import cv2
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', help='path to the saved model', default='/home/stu15/s15/ts6442/Capstone/codes/third_model_final.h5')
args = vars(ap.parse_args())
model = load_model(args['path'])
model.summary()
# grab the list of images
print('[INFO] loading images...')
imagePaths = glob.glob('/home/stu15/s15/ts6442/Capstone/images/images/*.jpg')
# Resize the image keeping aspect ratio in mind
aap = AspectAwarePreprocessor(128, 128)
# Resize the image without aspect ratio in mind
# sp = SimplePreprocessor(128, 128)
# converting images to array for easier processing
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
# as there are no labels using '_' in place of labels
(data, _) = sdl.load(imagePaths, verbose=1000)
data = data.astype('float') / 255.0
print('[INFO] total number of images are ', len(data))
print('Shape of data is', data.shape)
(trainX, testX, _, _) = train_test_split(data, _, test_size=0.05)
print('[INFO] train and test split created...')
print(trainX.shape)
checkpoint = ModelCheckpoint('final_model.h5', monitor='val_loss', mode='min', save_best_only=True, verbose=1)
callbacks = [checkpoint]
model.fit(trainX, trainX, batch_size=64, epochs=2, validation_data=(testX, testX), callbacks=callbacks) # model.save('first_try.h5')
# model.save('second_model_final.h5')
# print('[INFO] model saved...')
| [
"keras.models.load_model",
"preprocessing.datasets.SimpleDatasetLoader",
"argparse.ArgumentParser",
"keras.callbacks.ModelCheckpoint",
"sklearn.model_selection.train_test_split",
"preprocessing.preprocessing.ImageToArrayPreprocessor",
"preprocessing.preprocessing.AspectAwarePreprocessor",
"glob.glob"
... | [((684, 709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (707, 709), False, 'import argparse\n'), ((882, 906), 'keras.models.load_model', 'load_model', (["args['path']"], {}), "(args['path'])\n", (892, 906), False, 'from keras.models import load_model\n'), ((997, 1061), 'glob.glob', 'glob.glob', (['"""/home/stu15/s15/ts6442/Capstone/images/images/*.jpg"""'], {}), "('/home/stu15/s15/ts6442/Capstone/images/images/*.jpg')\n", (1006, 1061), False, 'import glob\n'), ((1117, 1150), 'preprocessing.preprocessing.AspectAwarePreprocessor', 'AspectAwarePreprocessor', (['(128)', '(128)'], {}), '(128, 128)\n', (1140, 1150), False, 'from preprocessing.preprocessing import AspectAwarePreprocessor, ImageToArrayPreprocessor, SimplePreprocessor\n'), ((1292, 1318), 'preprocessing.preprocessing.ImageToArrayPreprocessor', 'ImageToArrayPreprocessor', ([], {}), '()\n', (1316, 1318), False, 'from preprocessing.preprocessing import AspectAwarePreprocessor, ImageToArrayPreprocessor, SimplePreprocessor\n'), ((1412, 1457), 'preprocessing.datasets.SimpleDatasetLoader', 'SimpleDatasetLoader', ([], {'preprocessors': '[aap, iap]'}), '(preprocessors=[aap, iap])\n', (1431, 1457), False, 'from preprocessing.datasets import SimpleDatasetLoader\n'), ((1713, 1754), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', '_'], {'test_size': '(0.05)'}), '(data, _, test_size=0.05)\n', (1729, 1754), False, 'from sklearn.model_selection import train_test_split\n'), ((1836, 1937), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""final_model.h5"""'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_best_only': '(True)', 'verbose': '(1)'}), "('final_model.h5', monitor='val_loss', mode='min',\n save_best_only=True, verbose=1)\n", (1851, 1937), False, 'from keras.callbacks import ModelCheckpoint\n')] |
from collections import OrderedDict
from typing import Any, List, Optional
from pydantic import BaseModel, Field, HttpUrl
class ProductMetadata(BaseModel):
name: str
short_code: str
brand: str
sku: Optional[str]
url: HttpUrl
class ProductImage(BaseModel):
name: str
alt: str
suffix: str
url: HttpUrl
image: Any
class ProductDescription(BaseModel):
name: str
metadata: Optional[ProductMetadata]
images: Optional[List[ProductImage]]
header: str
summary: str
features: str
standard_accessory: str
detailed_description: str
optional_accessory: List[ProductMetadata] = Field(default_factory=list)
technical_specification_dict: OrderedDict
| [
"pydantic.Field"
] | [((647, 674), 'pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (652, 674), False, 'from pydantic import BaseModel, Field, HttpUrl\n')] |
from past.builtins import basestring
from acc_utils.errors import _assert
from .operator_base import OperatorBase
class ConvPoolOpBase(OperatorBase):
def __init__(
self,
kernel=1,
stride=1,
pad=0,
**kwargs
):
super(ConvPoolOpBase, self).__init__(**kwargs)
if isinstance(kernel, list) or isinstance(kernel, tuple):
_assert(len(kernel) == 2,
"kernel with int value or (int, int) format is only supported")
self.k_h, self.k_w = kernel
else:
self.k_h, self.k_w = kernel, kernel
self.stride = stride
if isinstance(pad, list) or isinstance(pad, tuple):
_assert(len(pad) == 4 or len(
pad) == 2, "pad with int value or (pad_h, pad_w) = int, int or (pad_t, pad_b, pad_l, pad_r) - int, int ,int ,int format is only supported")
if len(pad) == 2:
pad = [pad[0], pad[0], pad[1], pad[1]]
elif isinstance(pad, basestring):
if pad == 'VALID':
pad = [0, 0, 0, 0]
else:
_assert(False, 'not supported type for padding')
else:
pad = [pad for _ in range(4)]
self.pad_t, self.pad_b, self.pad_l, self.pad_r = pad
def flip_operation(self):
self.pad_r, self.pad_l = self.pad_l, self.pad_r
def __repr__(self):
options = "kernel(h,w): {}\tstride: {}\tpad: {}\t".format(
[self.k_h, self.k_w], self.stride, [self.pad_t, self.pad_b, self.pad_l, self.pad_r])
return super(ConvPoolOpBase, self).__repr__() + options
| [
"acc_utils.errors._assert"
] | [((1140, 1188), 'acc_utils.errors._assert', '_assert', (['(False)', '"""not supported type for padding"""'], {}), "(False, 'not supported type for padding')\n", (1147, 1188), False, 'from acc_utils.errors import _assert\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utilities for source code tokenization."""
import abc
import keyword
import re
import tokenize
import typing
from typing import Any, Dict, List, Sequence, Text, Tuple, Union
from absl import logging
import six
from cubert import unified_tokenizer
# Quote string for special tokens. Must make the resulting string a valid Python
# token.
SPECIAL_QUOTE = '___'
def quote_special(content):
return '{q}{t}{q}'.format(q=SPECIAL_QUOTE, t=content)
ENDMARKER = 'ENDMARKER'
NEWLINE = quote_special('NEWLINE')
# After all splitting, the longest a token is of the following length.
MAX_OUTPUT_TOKEN_LENGTH = 15
@six.add_metaclass(abc.ABCMeta)
class Tokenizer(object):
"""A tokenizer that implements a language-agnostic tokenization.
The tokenizer implements a language-agnostic tokenization. This is available
as `tokenize_and_abstract()`.
"""
def __init__(self, max_output_token_length = MAX_OUTPUT_TOKEN_LENGTH,
reserved = ()):
self.types_to_skip = []
self.reserved = reserved
self.mappings = dict()
self.max_output_token_length = max_output_token_length
@abc.abstractmethod
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code.
Args:
source_code: Source code stored in a string.
Returns:
A list of pairs of a token (string) and a token kind in the given source
code. It always includes an end of sequence token. That is, an empty
input always returns a list of size 1.
Raises:
ValueError: if `source_code` cannot be tokenized.
"""
@abc.abstractmethod
def untokenize_abstract(self, whole_tokens):
"""Applies language-specific rules to an abstract untokenized list.
Args:
whole_tokens: Abstract tokens, reconstituted and unsanitized by
`untokenize` before passed to this language-specific logic.
Returns:
A string representing the untokenized text.
"""
def update_types_to_skip(
self, types_to_skip):
"""Replaces the set of token types that are ignored.
Each tokenizer may provide different semantics with respect to this list,
and may ignore it altogether.
Args:
types_to_skip: List of types (from the constants in the `token` module) or
`unified_tokenizer.TokenKind`. Note that some of those constants are
actually defined in the `tokenize` module.
"""
self.types_to_skip = types_to_skip
def replace_reserved_keywords(self, reserved):
"""Replaces the reserved keywords with the supplied list of strings.
Each tokenizer may provide different semantics with respect to the list
of reserved keywords, or ignore them altogether.
Args:
reserved: List of strings.
"""
self.reserved = reserved # Replace the old one entirely.
def update_mappings(self, mappings):
"""Replaces the character mappings with the supplied dictionary.
The intent for character mappings is to enable tokenizers that support them
to sanitize dangerous characters, such as newline and carriage return,
with a nicer symbol.
Each tokenizer may provide different semantics with respect to the
mappings, or ignore them altogether.
Args:
mappings: Dictionary of original to sanitized strings. Keys are expected
to have length 1.
Raises:
ValueError: if a key has length different from 1.
"""
unified_tokenizer.check_mappings(mappings)
self.mappings = mappings
def condition_full_tokens(
self, agnostic
):
"""Applies reserved keywords and character sanitization."""
filtered = [(spelling, kind) for spelling, kind in agnostic
if kind not in self.types_to_skip]
# Now turn all reserved words, regardless of kind, into keywords.
with_reserved = [(spelling, unified_tokenizer.TokenKind.KEYWORD
if spelling in self.reserved else kind)
for spelling, kind in filtered]
return with_reserved
def subtokenize_full_tokens(
self, agnostic
):
"""Performs heuristic splitting of full tokens."""
subtoken_lists = unified_tokenizer.subtokenize_agnostic_tokens_in_place(
agnostic_tokens=agnostic,
max_output_token_length=self.max_output_token_length,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return subtoken_lists
def tokenize(self, source_code):
"""Tokenizes via `tokenize_and_abstract`."""
try:
agnostic = self.tokenize_and_abstract(source_code)
except Exception as e:
raise ValueError('While trying to do language-specific tokenization for '
'the string:\n\n\n%r\n\n\n%s\n\n\n'
'we received error %r.' % (source_code, source_code, e))
conditioned = self.condition_full_tokens(agnostic)
subtoken_lists = self.subtokenize_full_tokens(conditioned)
subtokens = unified_tokenizer.flatten_subtoken_lists(subtoken_lists)
return subtokens
def untokenize(self, token_list):
"""Untokenizes via `untokenize_abstract`."""
# Untokenize agnostic.
if (not token_list or token_list[-1] != quote_special(
unified_tokenizer.TokenKind.EOS.name)):
raise ValueError(
'Token list %r should end with the EOS token %r.' %
(token_list, quote_special(unified_tokenizer.TokenKind.EOS.name)))
whole_tokens = unified_tokenizer.reconstitute_full_unsanitary_tokens(
token_list,
sanitization_mapping=self.mappings,
sentinel=unified_tokenizer.SENTINEL)
return self.untokenize_abstract(whole_tokens)
def _token_from_token_type(token_type):
"""Turns a token type into a reserved token string."""
# We use the tok_name dict from tokenize, not token. The former has
# NL and COMMENT and such, whereas the latter doesn't.
return quote_special(tokenize.tok_name[token_type])
class CuBertTokenizer(Tokenizer):
"""Tokenizer that extracts Python's lexical elements preserving strings."""
_TOKEN_TYPE_MAP = {
tokenize.COMMENT: unified_tokenizer.TokenKind.COMMENT,
tokenize.DEDENT: unified_tokenizer.TokenKind.KEYWORD,
tokenize.ENDMARKER: unified_tokenizer.TokenKind.EOS,
tokenize.ERRORTOKEN: unified_tokenizer.TokenKind.ERROR,
tokenize.INDENT: unified_tokenizer.TokenKind.KEYWORD,
tokenize.NEWLINE: unified_tokenizer.TokenKind.NEWLINE,
tokenize.NL: unified_tokenizer.TokenKind.PUNCTUATION,
tokenize.NUMBER: unified_tokenizer.TokenKind.NUMBER,
tokenize.OP: unified_tokenizer.TokenKind.PUNCTUATION,
tokenize.STRING: unified_tokenizer.TokenKind.STRING,
}
_REVERSE_TOKEN_MAP = {
_token_from_token_type(tokenize.INDENT): tokenize.INDENT,
_token_from_token_type(tokenize.DEDENT): tokenize.DEDENT,
quote_special(
unified_tokenizer.TokenKind.EOS.name): tokenize.ENDMARKER,
quote_special(
unified_tokenizer.TokenKind.ERROR.name): tokenize.ERRORTOKEN,
quote_special(
unified_tokenizer.TokenKind.NEWLINE.name): tokenize.NEWLINE,
_token_from_token_type(tokenize.NL): tokenize.NL,
}
# Adding the end-of-string anchor \Z below, since re.fullmatch wasn't
# available in Python2.
_NUMBERS = re.compile('(' + tokenize.Number + r')\Z') # pytype: disable=module-attr
_SINGLE_STRINGS = re.compile('(' + tokenize.String + r')\Z') # pytype: disable=module-attr
_TRIPLE_STRING_BEGINNINGS = re.compile(tokenize.Triple) # pytype: disable=module-attr
_COMMENTS = re.compile('(' + tokenize.Comment + r')\Z') # pytype: disable=module-attr
_EXACT_TOKEN_TYPES = tokenize.EXACT_TOKEN_TYPES.keys() # pytype: disable=module-attr
# Token types that CubertTokenizer will tokenize by their type and not
# content.
_TOKEN_TYPES_TO_TOKENIZE_BY_TYPE = [
tokenize.NEWLINE, tokenize.DEDENT, tokenize.NL
]
def __init__(self, *args, **kwargs):
super(CuBertTokenizer, self).__init__(*args, **kwargs)
# By default, we drop COMMENT tokens.
self.update_types_to_skip([unified_tokenizer.TokenKind.COMMENT])
self.update_mappings({
# By default, replace \n and \r. We choose special names that are
# different from the Python token types (i.e., NL).
'\n':
quote_special('NLCHAR'),
'\r':
quote_special('CR'),
unified_tokenizer.SENTINEL:
quote_special(unified_tokenizer.SENTINEL_ESCAPE),
})
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code."""
token_pairs = [] # type: List[Tuple[Text, int]]
try:
token_tuples = unified_tokenizer.code_to_tokens(source_code)
token_pairs = [(six.ensure_text(token_name), token_type)
for token_type, token_name, _, _, _ in token_tuples]
except (tokenize.TokenError, IndentationError) as e:
logging.warning('The tokenizer raised exception `%s` while parsing %s',
e, source_code)
token_pairs = [
(quote_special(unified_tokenizer.TokenKind.ERROR.name),
tokenize.ERRORTOKEN),
('',
tokenize.ENDMARKER),
]
agnostic_tokens = [] # type: List[Tuple[Text, unified_tokenizer.TokenKind]]
for spelling, kind in token_pairs:
adjusted_spelling = spelling
token_kind = unified_tokenizer.TokenKind.NONE
if kind == tokenize.NAME:
# Disambiguate identifiers from keywords.
if keyword.iskeyword(spelling):
token_kind = unified_tokenizer.TokenKind.KEYWORD
else:
token_kind = unified_tokenizer.TokenKind.IDENTIFIER
else:
if kind in CuBertTokenizer._TOKEN_TYPES_TO_TOKENIZE_BY_TYPE:
# Replace spelling with type.
adjusted_spelling = _token_from_token_type(kind)
elif kind is tokenize.INDENT:
# For INDENT, in particular, we also record the actual spelling too.
adjusted_spelling = '{indent}{spelling}'.format(
indent=_token_from_token_type(kind),
spelling=spelling)
elif kind == tokenize.ENDMARKER:
adjusted_spelling = quote_special(
unified_tokenizer.TokenKind.EOS.name)
# Map everything according to table.
try:
token_kind = CuBertTokenizer._TOKEN_TYPE_MAP[kind]
except KeyError as ke:
# It's possible we're here because of async/await. Those kept being
# turned into keywords and then removed from keywords, so we can't
# rely on knowing which they are. We'll check by spelling.
# See: https://bugs.python.org/issue30406
# and https://bugs.python.org/issue33260
# and https://bugs.python.org/issue35975
if spelling in ('async', 'await'):
token_kind = unified_tokenizer.TokenKind.KEYWORD
else:
raise ValueError('While trying to turn Python token %r into an '
'agnostic one, raised %r.' % ((spelling, kind),
ke))
agnostic_tokens.append((adjusted_spelling, token_kind))
return agnostic_tokens
def untokenize_abstract(self, whole_tokens):
# Reconstruct Python tokenizer tuples, so that Python's untokenize can be
# invoked.
token_tuples = [] # type: List[Tuple[int, Text]]
for whole_token in whole_tokens:
if whole_token in CuBertTokenizer._EXACT_TOKEN_TYPES:
token_tuples.append((tokenize.OP, whole_token))
elif _token_from_token_type(tokenize.INDENT) in whole_token:
# We baked the type and spelling into one token. Break them up.
spelling = whole_token.replace(
_token_from_token_type(tokenize.INDENT), '')
token_tuples.append((tokenize.INDENT, spelling))
elif whole_token in CuBertTokenizer._REVERSE_TOKEN_MAP:
python_kind = CuBertTokenizer._REVERSE_TOKEN_MAP[whole_token]
if python_kind in (tokenize.DEDENT, tokenize.ENDMARKER,
tokenize.ERRORTOKEN):
spelling = ''
else: # python_kind in (tokenize.NEWLINE, tokenize.NL)
spelling = '\n'
token_tuples.append((python_kind, spelling))
elif keyword.iskeyword(whole_token):
token_tuples.append((tokenize.NAME, whole_token))
elif CuBertTokenizer._NUMBERS.match(whole_token):
token_tuples.append((tokenize.NUMBER, whole_token))
elif CuBertTokenizer._SINGLE_STRINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif CuBertTokenizer._TRIPLE_STRING_BEGINNINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif CuBertTokenizer._COMMENTS.match(whole_token):
token_tuples.append((tokenize.COMMENT, whole_token))
else:
# Everything else we map back to NAME.
token_tuples.append((tokenize.NAME, whole_token))
reconstructed = tokenize.untokenize(typing.cast(Any, token_tuples))
return reconstructed
| [
"cubert.unified_tokenizer.check_mappings",
"cubert.unified_tokenizer.subtokenize_agnostic_tokens_in_place",
"re.compile",
"six.add_metaclass",
"six.ensure_text",
"cubert.unified_tokenizer.flatten_subtoken_lists",
"absl.logging.warning",
"keyword.iskeyword",
"tokenize.EXACT_TOKEN_TYPES.keys",
"cube... | [((1245, 1275), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (1262, 1275), False, 'import six\n'), ((7902, 7944), 're.compile', 're.compile', (["('(' + tokenize.Number + ')\\\\Z')"], {}), "('(' + tokenize.Number + ')\\\\Z')\n", (7912, 7944), False, 'import re\n'), ((7996, 8038), 're.compile', 're.compile', (["('(' + tokenize.String + ')\\\\Z')"], {}), "('(' + tokenize.String + ')\\\\Z')\n", (8006, 8038), False, 'import re\n'), ((8100, 8127), 're.compile', 're.compile', (['tokenize.Triple'], {}), '(tokenize.Triple)\n', (8110, 8127), False, 'import re\n'), ((8173, 8216), 're.compile', 're.compile', (["('(' + tokenize.Comment + ')\\\\Z')"], {}), "('(' + tokenize.Comment + ')\\\\Z')\n", (8183, 8216), False, 'import re\n'), ((8272, 8305), 'tokenize.EXACT_TOKEN_TYPES.keys', 'tokenize.EXACT_TOKEN_TYPES.keys', ([], {}), '()\n', (8303, 8305), False, 'import tokenize\n'), ((4065, 4107), 'cubert.unified_tokenizer.check_mappings', 'unified_tokenizer.check_mappings', (['mappings'], {}), '(mappings)\n', (4097, 4107), False, 'from cubert import unified_tokenizer\n'), ((4785, 5001), 'cubert.unified_tokenizer.subtokenize_agnostic_tokens_in_place', 'unified_tokenizer.subtokenize_agnostic_tokens_in_place', ([], {'agnostic_tokens': 'agnostic', 'max_output_token_length': 'self.max_output_token_length', 'sanitization_mapping': 'self.mappings', 'sentinel': 'unified_tokenizer.SENTINEL'}), '(agnostic_tokens=\n agnostic, max_output_token_length=self.max_output_token_length,\n sanitization_mapping=self.mappings, sentinel=unified_tokenizer.SENTINEL)\n', (4839, 5001), False, 'from cubert import unified_tokenizer\n'), ((5586, 5642), 'cubert.unified_tokenizer.flatten_subtoken_lists', 'unified_tokenizer.flatten_subtoken_lists', (['subtoken_lists'], {}), '(subtoken_lists)\n', (5626, 5642), False, 'from cubert import unified_tokenizer\n'), ((6067, 6209), 'cubert.unified_tokenizer.reconstitute_full_unsanitary_tokens', 'unified_tokenizer.reconstitute_full_unsanitary_tokens', (['token_list'], {'sanitization_mapping': 'self.mappings', 'sentinel': 'unified_tokenizer.SENTINEL'}), '(token_list,\n sanitization_mapping=self.mappings, sentinel=unified_tokenizer.SENTINEL)\n', (6120, 6209), False, 'from cubert import unified_tokenizer\n'), ((9311, 9356), 'cubert.unified_tokenizer.code_to_tokens', 'unified_tokenizer.code_to_tokens', (['source_code'], {}), '(source_code)\n', (9343, 9356), False, 'from cubert import unified_tokenizer\n'), ((13649, 13679), 'typing.cast', 'typing.cast', (['Any', 'token_tuples'], {}), '(Any, token_tuples)\n', (13660, 13679), False, 'import typing\n'), ((9557, 9648), 'absl.logging.warning', 'logging.warning', (['"""The tokenizer raised exception `%s` while parsing %s"""', 'e', 'source_code'], {}), "('The tokenizer raised exception `%s` while parsing %s', e,\n source_code)\n", (9572, 9648), False, 'from absl import logging\n'), ((10144, 10171), 'keyword.iskeyword', 'keyword.iskeyword', (['spelling'], {}), '(spelling)\n', (10161, 10171), False, 'import keyword\n'), ((9379, 9406), 'six.ensure_text', 'six.ensure_text', (['token_name'], {}), '(token_name)\n', (9394, 9406), False, 'import six\n'), ((12911, 12941), 'keyword.iskeyword', 'keyword.iskeyword', (['whole_token'], {}), '(whole_token)\n', (12928, 12941), False, 'import keyword\n')] |
#!/usr/bin/env python3
# TODO: Rewrite to remove unnecessary features; this program will only ever
# handle metadata.json version numbers.
"""
Increment a version number in a JSON file.
The JSON must have a top-level "version" key, as either a float or an int.
Usage:
python increment.py metadata.json -i 0.1 -O
This will create a backup file of metadata.json, increment the version number
by 0.1, and then overwrite metadata.json.
"""
import argparse
import json
import os
import sys
TYPE_STRING = str
TYPE_NUMBER = float
def info(message):
sys.stderr.write(message.rstrip("\r\n"))
sys.stderr.write(os.linesep)
def read_lines(farg):
if farg is not None:
with open(farg, "rt") as fobj:
return fobj.read()
else:
return sys.stdin.read()
def num_places(num):
if num != int(num):
l = len(str(num))
li = len(str(int(num)))+1
return max(l-li, 0)
return 0
def get_type(ver):
if any(isinstance(ver, t) for t in (str, bytes)):
return TYPE_STRING
elif type(ver).__name__ == "unicode": # 2.x
return TYPE_STRING
elif isinstance(ver, int) or isinstance(ver, float):
return TYPE_NUMBER
else:
# Default to string; this can handle most cases
return TYPE_STRING
def parse_version(ver):
"Return a float and the attributes describing the initial format"
is_string = False
is_fixed = False
num_places = None
vtype = type(ver).__name__
if vtype in ('str', 'unicode', 'bytes'):
is_string = True
if vtype == 'unicode':
sval = ver.encode('ascii')
elif vtype == 'bytes':
sval = ver.decode('ascii')
else:
sval = ver
if '.' not in sval:
is_fixed = True
else:
num_places = len(sval) - sval.index('.')
val = float(sval)
elif vtype == 'int':
is_fixed = True
val = ver
elif vtype == 'float':
sval = str(ver)
if '.' in sval:
num_places = len(sval) - sval.index('.')
else:
num_places = 0
val = ver
return val, {
"type": vtype,
"is_string": is_string,
"is_fixed": is_fixed,
"num_places": num_places
}
def format_version(ver, attrs, places=None):
"Format a version (float) according to the attributes given"
vtype = attrs.get("type", "float")
is_string = attrs.get("is_string", False)
is_fixed = attrs.get("is_fixed", False)
if places is None:
num_places = attrs.get("num_places", None)
else:
num_places = places
if is_fixed and int(ver) != ver:
# Elevate to a float
is_fixed = False
if num_places is None:
num_places = 0
if is_string:
if is_fixed and int(ver) == ver:
val = str(int(ver))
elif num_places is not None and num_places > 0:
val = str(round(ver, num_places))
else:
val = str(ver)
if vtype == 'bytes':
val = val.encode('ascii')
elif vtype == 'unicode':
val = val.decode('ascii')
elif is_fixed:
val = int(ver)
else:
val = ver
if num_places is not None and num_places > 0:
val = round(ver, num_places)
return val
def update_version(ver, set_v=None, inc_v=None, places=None):
"Update a version number (which can be a string, integer, or float)"
ver_val, attrs = parse_version(ver)
if set_v is not None:
ver_val = set_v
elif inc_v is not None:
ver_val += inc_v
new_ver = format_version(ver_val, attrs, places=places)
info("Set version to {!r} (from {!r})".format(new_ver, ver))
return new_ver
def backup(fpath, suffix="backup", odir=None):
"""Create a backup of fpath. Returns both the bytes written and the path to
the backup file. The backup file is placed in odir, if present, or the same
directory as fpath.
To prevent overwriting existing backup files, a number is appended to the
backup filename and incremented until a new filename is found.
"""
fbase = os.path.basename(fpath)
bkdir = fbase if odir is None else odir # backup file directory
bkname = fbase + "." + suffix # backup file name
bkpath = os.path.join(bkdir, bkname) # backup file path
sver = 0 # unique sequence number
while os.path.exists(bkpath):
sver += 1
bkname = "{}.{}.{}".format(fbase, suffix, sver)
bkpath = os.path.join(bkdir, bkname)
nbytes = 0
with open(fpath, "rt") as ifobj:
data = ifobj.read()
nbytes = len(data)
with open(bkpath, "wt") as ofobj:
ofobj.write(data)
return nbytes, bkpath
def main():
ap = argparse.ArgumentParser(epilog="""
This program will work on any JSON file that has a top-level "version" key. The
value must be either an integer or float.
""")
ap.add_argument("metadata", nargs="?",
help="metadata.json file path (default: read from stdin)")
ap.add_argument("-i", "--inc", metavar="NUM", type=float, default=0.1,
help="add %(metavar)s to the version number (default: %(default)s)")
ap.add_argument("-s", "--set", metavar="NUM", type=float,
help="set the version number to %(metavar)s")
ap.add_argument("-n", "--places", metavar="NUM", type=int,
help="round version to %(metavar)s digits after the decimal (default: deduce)")
ap.add_argument("-o", "--out", metavar="PATH",
help="write output to %(metavar)s (default: stdout)")
ap.add_argument("-O", "--overwrite", action="store_true",
help="overwrite file in-place (implies -o=<metadata>)")
ap.add_argument("-B", "--backup-dir", metavar="PATH",
help="place backup in %(metavar)s (default: current directory)")
ap.add_argument("-S", "--backup-suffix", metavar="STR", default="backup",
help="backup file suffix (default: %(default)s)")
ap.add_argument("--no-backup", action="store_true",
help="do not create a backup file when overwriting the input file")
args = ap.parse_args()
if args.overwrite and args.metadata is None:
ap.error("--overwrite requires a filename; can't overwrite stdin")
# Read the JSON object and set/increment the version number
lines = read_lines(args.metadata)
data = json.loads(lines)
data["version"] = update_version(data["version"],
set_v=args.set, inc_v=args.inc, places=args.places)
output_file = None
if args.overwrite:
output_file = args.metadata
elif args.out is not None:
output_file = args.out
# Create a backup of the input file if we're about to overwrite it
if output_file is not None and os.path.samefile(output_file, args.metadata):
if not args.no_backup:
nbytes, bkpath = backup(args.metadata, suffix=args.backup_suffix, odir=args.backup_dir)
info("Copied {} to {} ({} bytes)".format(args.metadata, bkpath, nbytes))
else:
info("Warning: about to overwrite {}!".format(args.metadata))
# Output the final JSON object
out = json.dumps(data, sort_keys=True, indent=2)
if output_file is None:
print(out)
else:
with open(output_file, "wt") as fobj:
fobj.write(out)
fobj.write("\n")
info("Wrote {} bytes to {}".format(len(out), output_file))
if __name__ == "__main__":
main()
| [
"os.path.samefile",
"os.path.exists",
"json.loads",
"argparse.ArgumentParser",
"json.dumps",
"os.path.join",
"sys.stderr.write",
"os.path.basename",
"sys.stdin.read"
] | [((602, 630), 'sys.stderr.write', 'sys.stderr.write', (['os.linesep'], {}), '(os.linesep)\n', (618, 630), False, 'import sys\n'), ((4152, 4175), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (4168, 4175), False, 'import os\n'), ((4320, 4347), 'os.path.join', 'os.path.join', (['bkdir', 'bkname'], {}), '(bkdir, bkname)\n', (4332, 4347), False, 'import os\n'), ((4449, 4471), 'os.path.exists', 'os.path.exists', (['bkpath'], {}), '(bkpath)\n', (4463, 4471), False, 'import os\n'), ((4819, 4994), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'epilog': '"""\nThis program will work on any JSON file that has a top-level "version" key. The\nvalue must be either an integer or float.\n """'}), '(epilog=\n """\nThis program will work on any JSON file that has a top-level "version" key. The\nvalue must be either an integer or float.\n """\n )\n', (4842, 4994), False, 'import argparse\n'), ((6417, 6434), 'json.loads', 'json.loads', (['lines'], {}), '(lines)\n', (6427, 6434), False, 'import json\n'), ((7196, 7238), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)', 'indent': '(2)'}), '(data, sort_keys=True, indent=2)\n', (7206, 7238), False, 'import json\n'), ((774, 790), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (788, 790), False, 'import sys\n'), ((4564, 4591), 'os.path.join', 'os.path.join', (['bkdir', 'bkname'], {}), '(bkdir, bkname)\n', (4576, 4591), False, 'import os\n'), ((6800, 6844), 'os.path.samefile', 'os.path.samefile', (['output_file', 'args.metadata'], {}), '(output_file, args.metadata)\n', (6816, 6844), False, 'import os\n')] |
import unittest
from app.models import News_Source
class News_sourcesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the New Source Class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.news_source = News_Source("abc-news", "ABC News", 'Your trusted source for breaking news and analysis',
"en", "https://abcnews.go.com", "us")
def test_instance(self):
'''
Test that the instance created was created from the News_Source class
'''
self.assertTrue(isinstance(self.news_source, News_Source))
def test_attr(self):
'''
Test that the instance were created correctly
'''
self.assertEqual(self.news_source.id, "abc-news")
self.assertEqual(self.news_source.name, "ABC News")
self.assertEqual(self.news_source.description,
'Your trusted source for breaking news and analysis')
self.assertEqual(self.news_source.language, "en")
self.assertEqual(self.news_source.url, "https://abcnews.go.com")
self.assertEqual(self.news_source.country, "us")
| [
"app.models.News_Source"
] | [((300, 435), 'app.models.News_Source', 'News_Source', (['"""abc-news"""', '"""ABC News"""', '"""Your trusted source for breaking news and analysis"""', '"""en"""', '"""https://abcnews.go.com"""', '"""us"""'], {}), "('abc-news', 'ABC News',\n 'Your trusted source for breaking news and analysis', 'en',\n 'https://abcnews.go.com', 'us')\n", (311, 435), False, 'from app.models import News_Source\n')] |
import re
from services import Boto3Service
from nodes import Node, ServiceNodes
from boto3_docs_parser import Boto3DocsParser, boto3_session, parser
import os
import boto3
from pprint import pprint
from collections import namedtuple
from itertools import combinations
from services import is_method_attr_in_list, is_required_parameter_exist_in_methods_attributes, compare_nouns, find_parameter_in_attributes
import json
from thefuzz import fuzz
from utils import camel_case_split, deep_key_search
ALL_AWS_SERVICES = parser.get_all_available_services()
def compare_two_attributes_list(method1, method2):
attr1 = method1.get_attributes()
attr2 = method2.get_attributes()
short_list, long_list = attr1, attr2
if len(short_list) > len(long_list):
long_list, short_list = short_list, long_list
length_ratio = (len(short_list)/len(long_list))*100
found_count = 0
for shorty in short_list:
if is_method_attr_in_list(shorty, long_list):
found_count += 1
found_ratio = (found_count / len(long_list))*100
if found_ratio > 50:
found_ratio
if found_ratio == length_ratio and found_ratio not in (0, 100):
print("SHORT LIST HAS EVERYTHING IN THE LONG LIST. BUT LONG LIST IS MORE DETAILED.")
print("short_list", short_list)
print("long_list", long_list)
return found_ratio, length_ratio
def choose_(all_service_nodes, service_node,node, accepted_method_verbs=('get', 'describe', 'list', 'search')):
# def choose_(service_node, node, method_verbs=('describe', 'list', 'search')):
"""Choose between method verbs.
Priorities:
"""
# filter the node's methods with accepted method verbs
accepted_methods = [
method for method in node.methods
if method.get_verb() in accepted_method_verbs
]
if not accepted_methods:
# Node has no accepted methods, failing to choose any method.
return False
method_parameter_relations = extract_node_methods_relations(all_service_nodes, service_node, node, accepted_method_verbs)
method_parameter_relations
# TODO: save methods
# -------------------------------
# if we got multiple accepted methods we have to select between functions
if len(accepted_methods) > 1:
# priority 1: if method has no required parameter
two_method_combinations = list(combinations(accepted_methods, 2))
two_method_combinations
for first_method, second_method in two_method_combinations:
found_ratio, length_ratio = compare_two_attributes_list(first_method, second_method)
if 100 > found_ratio > 0:
print(">>>> BOTH FUNCTIONS NEEDS TO BE CALLED ")
# print()
# print(f">>>> First Method Attrs:", first_method.get_attributes())
# print()
# print(f">>>> Second Method Attrs:", second_method.get_attributes())
# print()
print(f"\t### RATIO #### {first_method.name} :: {second_method.name} \tfound_ratio:{found_ratio} | length_ratio:{length_ratio}")
if found_ratio == 100:
non_get_funcs = [m.name for m in (first_method, second_method) if m.get_verb()!='get'] # don't prefer get_ verb
print(non_get_funcs)
print(f"\t### RATIO #### found ratio is 100. Will choose the non-get function, or less parameter demanding function.")
print()
print()
def find_methods_having_least_amount_of_required_parameters(methods_list):
"""
checks if the first marp has less required parameters than all of the other ones
returns the first marp and other marps with the same cound of required parameters as the first marp.
"""
# TODO: check if the values can be found in the node's attrs. If it can be found, it means
# list or describe needs the resources own ids, so we can choose the other one. Other one probably
# gets its parameters from one higher up resource.
# sort methods_and_required_parameters for their required parameters arr. length
sorted_methods = list(sorted(methods_list, key=lambda method: len(method.get_required_parameters())))
# select the first method and add it to the list by default
first_method = sorted_methods[0]
first_methods_req_params = first_method.get_required_parameters()
same_req_param_count_methods = [first_method]
for other_method in sorted_methods[1:]:
# look for other
other_method_req_params = other_method.get_required_parameters()
if len(first_methods_req_params) == len(other_method_req_params):
same_req_param_count_methods.append(other_method)
return same_req_param_count_methods
# possible_methods will always have the same length of required_parameters
# possible_methods = find_methods_having_least_amount_of_required_parameters(accepted_methods)
possible_methods = accepted_methods
has_more_than_one_possible_methods = len(possible_methods) > 1
does_all_methods_has_no_required_parameters = all([m.get_required_parameters()==[] for m in possible_methods])
required_param_methods = [m for m in possible_methods if m.get_required_parameters()]
non_required_param_methods = [m for m in possible_methods if not m.get_required_parameters()]
non_required_param_methods
# dealing with multiple methods, len > 1 and all the possible methods will have the same amount of req parameters
if has_more_than_one_possible_methods:
pass
compare_method_attributes_to_nodes(node, possible_methods)
if does_all_methods_has_no_required_parameters:
# we got multiple possible methods, and none of them has any required parameters
# sometimes list and describe doesn't give same data. we should compare method's
# returns_keys and the node's attributes
print('\n### CASE: MULTIPLE POSSIBLE METHODS BUT NO REQUIRED PARAMETERS')
print('--'*20)
# TODO: Compare returns_keys of each method to node's attributes
for method in possible_methods:
print(method)
pprint(method.get_returns_keys())
print('\n'*4)
possible_methods
if not does_all_methods_has_no_required_parameters:
# all the possible methods will have the same amount of req parameters
# we have multiple possible methods, and they have required parameters
# do the attribute comparision
# TODO: see if there are any rule breakers
print('\n### CASE: SELECT BETWEEN METHODS (Multiple Required Parameters):', [(m.name, m.get_required_parameters()) for m in possible_methods])
print('--'*20)
for method in possible_methods:
print("\nmethod: ", node.name, method.name, method.get_required_parameters())
pprint(method.get_returns_keys())
print('\n'*4)
print()
# try to find which method you will choose
# we know
# selected_marp = possible_methods[0]
# selected_marp
# is_only_method_and_got_required_param = len(methods_and_required_parameters) == 1 and \
# methods_and_required_parameters[0].get('required_parameters')
# if is_only_method_and_got_required_param:
# # this is the case when we have to find the required parameters.
# methods_and_required_parameters
return None
def find_only_list_no_describe_nodes(service_node):
for node in service_node.get_nodes(flat=True):
has_prefix_method_lambda = lambda method, accepted_verbs: any(([method.name.startswith(accepted_verb) for accepted_verb in accepted_verbs]))
for method in node.methods:
has_list_method= has_prefix_method_lambda(method, ('list'))
has_describe_method= has_prefix_method_lambda(method, ('describe'))
if has_list_method and not has_describe_method:
print(f'ERROR: [NO DESCRIBE ONLY LIST]{service_node.service_name} {node.name} {method.name}')
pprint(method.get_returns_keys())
print(f'\n'*2)
pprint(node.get_attributes())
print(f'\n'*4)
def analysis_all_services_paginator_functions():
accepted_method_verbs=('get', 'describe', 'list', 'search')
for service_name in ALL_AWS_SERVICES:
service = Boto3Service(service_name, boto3_session)
service_node = ServiceNodes(service)
print(service_node.service_name)
for node in service_node.get_nodes():
# found_node_paginators = [method for method in node.methods if method.is_paginator]
found_node_paginators = [method for method in node.methods if any([method.name.startswith(accepted_verb) for accepted_verb in accepted_method_verbs])]
if found_node_paginators:
for method in found_node_paginators:
print()
print('\t', node.name)
print('\t\t',method)
node_stop = 1
print("-*-"*20)
service_stop = 1
def analysis_with_filter_functions(service_nodes, node_filter_func=None, method_filter_func=None):
for service_node in service_nodes:
nodes = service_node.get_nodes()
if node_filter_func is not None:
nodes = list(filter(node_filter_func, nodes))
print('\t\t\t-servicenode', service_node.service_name)
for node in nodes:
print('\t\tnode--', node.name)
methods = node.methods
for method in methods:
# print('+'*14)
if method_filter_func(method):
print(method)
print('--'*20)
print('-*-'*20)
def get_every_unique_parameter_names():
accepted_method_verbs=('get', 'describe', 'list', 'search')
all_parameter_names = []
for service_name in ALL_AWS_SERVICES:
service = Boto3Service(service_name, boto3_session)
service_node = ServiceNodes(service)
for node in service_node.get_nodes():
# found_node_paginators = [method for method in node.methods if method.is_paginator]
methods = [method for method in node.methods if method.get_verb() in accepted_method_verbs]
for method in methods:
req_params = method.get_required_parameters()
for rp in req_params:
rp_name = rp.get('name')
rp_text = rp.get('text')
if rp_name not in all_parameter_names:
text ="{:25s} <|> {:40s} <|> {:50s} <|> {:25s} <|> {}".format(service_node.service_name, node.name, repr(method), rp_name, rp_text)
print(text)
all_parameter_names.append(rp_name)
print('###'*30)
return all_parameter_names
| [
"nodes.ServiceNodes",
"services.is_method_attr_in_list",
"itertools.combinations",
"services.Boto3Service",
"boto3_docs_parser.parser.get_all_available_services"
] | [((517, 552), 'boto3_docs_parser.parser.get_all_available_services', 'parser.get_all_available_services', ([], {}), '()\n', (550, 552), False, 'from boto3_docs_parser import Boto3DocsParser, boto3_session, parser\n'), ((934, 975), 'services.is_method_attr_in_list', 'is_method_attr_in_list', (['shorty', 'long_list'], {}), '(shorty, long_list)\n', (956, 975), False, 'from services import is_method_attr_in_list, is_required_parameter_exist_in_methods_attributes, compare_nouns, find_parameter_in_attributes\n'), ((9036, 9077), 'services.Boto3Service', 'Boto3Service', (['service_name', 'boto3_session'], {}), '(service_name, boto3_session)\n', (9048, 9077), False, 'from services import Boto3Service\n'), ((9101, 9122), 'nodes.ServiceNodes', 'ServiceNodes', (['service'], {}), '(service)\n', (9113, 9122), False, 'from nodes import Node, ServiceNodes\n'), ((10654, 10695), 'services.Boto3Service', 'Boto3Service', (['service_name', 'boto3_session'], {}), '(service_name, boto3_session)\n', (10666, 10695), False, 'from services import Boto3Service\n'), ((10719, 10740), 'nodes.ServiceNodes', 'ServiceNodes', (['service'], {}), '(service)\n', (10731, 10740), False, 'from nodes import Node, ServiceNodes\n'), ((2397, 2430), 'itertools.combinations', 'combinations', (['accepted_methods', '(2)'], {}), '(accepted_methods, 2)\n', (2409, 2430), False, 'from itertools import combinations\n')] |
# -*- coding: utf-8 -*-
import argparse
import re
import subprocess
import sys
from typing import List
from .fork_release import create_develop_if_not_exists
def main():
args = parse_args(sys.argv[1:])
branch_name = f"feature-{args.feature_name}"
cp = subprocess.run(["git", "branch"], stdout=subprocess.PIPE)
if re.search(branch_name, cp.stdout.decode("utf-8")) is None:
print(f"Branch {branch_name} doesn't exist.")
exit(1)
create_develop_if_not_exists()
subprocess.run(["git", "checkout", "develop"])
subprocess.run(["git", "merge", "--no-ff", branch_name])
subprocess.run(["git", "branch", "-d", branch_name])
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser("merge_feature")
parser.add_argument("feature_name")
return parser.parse_args(argv)
if __name__ == "__main__":
exit(main())
| [
"subprocess.run",
"argparse.ArgumentParser"
] | [((268, 325), 'subprocess.run', 'subprocess.run', (["['git', 'branch']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'branch'], stdout=subprocess.PIPE)\n", (282, 325), False, 'import subprocess\n'), ((502, 548), 'subprocess.run', 'subprocess.run', (["['git', 'checkout', 'develop']"], {}), "(['git', 'checkout', 'develop'])\n", (516, 548), False, 'import subprocess\n'), ((553, 609), 'subprocess.run', 'subprocess.run', (["['git', 'merge', '--no-ff', branch_name]"], {}), "(['git', 'merge', '--no-ff', branch_name])\n", (567, 609), False, 'import subprocess\n'), ((614, 666), 'subprocess.run', 'subprocess.run', (["['git', 'branch', '-d', branch_name]"], {}), "(['git', 'branch', '-d', branch_name])\n", (628, 666), False, 'import subprocess\n'), ((737, 777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""merge_feature"""'], {}), "('merge_feature')\n", (760, 777), False, 'import argparse\n')] |
from src.utils.position import Position
from src.exceptions.position_exception import PositionException
import unittest
class TestPosition(unittest.TestCase):
def test_position_creation(self):
pos = Position(line=3, column=10)
self.assertEqual(pos.line, 3, "Line doesn't match")
self.assertEqual(pos.column, 10, "Column doesn't match")
def test_position_advance_column(self):
pos = Position(line=1, column=1)
pos.advance_column()
self.assertEqual(pos.line, 1, "Line number shouldn't change")
self.assertEqual(pos.column, 2, "Column number should change")
def test_position_advance_line(self):
pos = Position(line=10, column=12)
pos.advance_line()
self.assertEqual(pos.line, 11, "Line number should change")
self.assertEqual(pos.column, 0, "Column number should be 0")
def testEquality(self):
pos1 = Position(line=2, column=3)
pos2 = Position(line=2, column=2)
pos2.advance_column()
self.assertEqual(pos1, pos2, f"Positions should be equal\npos1: {repr(pos1)}\npos2: {repr(pos2)}")
def test_invalid_line(self):
with self.assertRaises(PositionException) as cm:
Position(line=-1, column=0)
the_exception = cm.exception
self.assertEqual(
the_exception.message,
"Line number cannot be less or equal to 0"
)
def test_invalid_column(self):
with self.assertRaises(PositionException) as cm:
Position(line=1, column=-10)
the_exception = cm.exception
self.assertEqual(
the_exception.message,
"Column number cannot be less than 0"
) | [
"src.utils.position.Position"
] | [((214, 241), 'src.utils.position.Position', 'Position', ([], {'line': '(3)', 'column': '(10)'}), '(line=3, column=10)\n', (222, 241), False, 'from src.utils.position import Position\n'), ((427, 453), 'src.utils.position.Position', 'Position', ([], {'line': '(1)', 'column': '(1)'}), '(line=1, column=1)\n', (435, 453), False, 'from src.utils.position import Position\n'), ((682, 710), 'src.utils.position.Position', 'Position', ([], {'line': '(10)', 'column': '(12)'}), '(line=10, column=12)\n', (690, 710), False, 'from src.utils.position import Position\n'), ((922, 948), 'src.utils.position.Position', 'Position', ([], {'line': '(2)', 'column': '(3)'}), '(line=2, column=3)\n', (930, 948), False, 'from src.utils.position import Position\n'), ((964, 990), 'src.utils.position.Position', 'Position', ([], {'line': '(2)', 'column': '(2)'}), '(line=2, column=2)\n', (972, 990), False, 'from src.utils.position import Position\n'), ((1235, 1262), 'src.utils.position.Position', 'Position', ([], {'line': '(-1)', 'column': '(0)'}), '(line=-1, column=0)\n', (1243, 1262), False, 'from src.utils.position import Position\n'), ((1536, 1564), 'src.utils.position.Position', 'Position', ([], {'line': '(1)', 'column': '(-10)'}), '(line=1, column=-10)\n', (1544, 1564), False, 'from src.utils.position import Position\n')] |
from .task import BaseQueueTask as _BaseQueueTask
from .features import BaseFeatureAdapterFactory as _BaseFeatureAdapterFactory
from .adapter.collection import BaseList as _BaseList
from .result import MRResult as _MRResult
from ..mode import RunningMode as _RunningMode
from ..types import MRTasks as _MRTasks
import multirunnable._utils as _utils
from abc import ABCMeta, abstractmethod
from typing import List, Tuple, Dict, Optional, Union, Callable as CallableType, Iterable as IterableType
from types import MethodType, FunctionType
from collections.abc import Callable
class BaseExecutor(metaclass=ABCMeta):
def __init__(self, mode: _RunningMode, executors: int):
self._mode = mode
self._executors_number = executors
def __str__(self):
return f"{self.__str__()} at {id(self.__class__)}"
def __repr__(self):
__instance_brief = None
# # self.__class__ value: <class '__main__.ACls'>
__cls_str = str(self.__class__)
__cls_name = _utils.get_cls_name(cls_str=__cls_str)
if __cls_name != "":
__instance_brief = f"{__cls_name}(" \
f"mode={self._mode}, " \
f"worker_num={self._executors_number})"
else:
__instance_brief = __cls_str
return __instance_brief
@abstractmethod
def _initial_running_strategy(self) -> None:
"""
Description:
Initialize and instantiate RunningStrategy.
:return:
"""
pass
@abstractmethod
def start_new_worker(self, target: Callable, *args, **kwargs) -> None:
"""
Description:
Initial and activate an executor to run.
:param target:
:param args:
:param kwargs:
:return:
"""
pass
@abstractmethod
def run(self,
function: CallableType,
args: Optional[Union[Tuple, Dict]] = None,
queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None) -> None:
pass
# @abstractmethod
# def async_run(self,
# function: CallableType,
# args_iter: IterableType = [],
# queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
# features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None) -> None:
# pass
@abstractmethod
def map(self,
function: CallableType,
args_iter: IterableType = [],
queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None) -> None:
"""
Description:
Receive a parameters (the arguments of target function) List
object and distribute them to
1. Multiple Worker (Process, Thread, etc) by the length of list object.
2. Multiple Worker by an option value like 'worker_num' or something else.
:param function:
:param args_iter:
:param queue_tasks:
:param features:
:return:
"""
pass
# @abstractmethod
# def async_map(self) -> None:
# """
# Description:
# Asynchronous version of map.
# :return:
# """
# pass
@abstractmethod
def map_with_function(self,
functions: IterableType[Callable],
args_iter: IterableType = [],
queue_tasks: Optional[Union[_BaseQueueTask, _BaseList]] = None,
features: Optional[Union[_BaseFeatureAdapterFactory, _BaseList]] = None) -> None:
"""
Description:
Receive a function (Callable object) List object and distribute
them to
1. Multiple Worker (Process, Thread, etc) by the length of list object.
2. Multiple Worker by an option value like 'worker_num' or something else.
:param functions:
:param args_iter:
:param queue_tasks:
:param features:
:return:
"""
pass
@abstractmethod
def close(self, workers: Union[_MRTasks, List[_MRTasks]]) -> None:
"""
Description:
Close executor(s).
:return:
"""
pass
@abstractmethod
def terminal(self) -> None:
"""
Description:
Terminate executor.
:return:
"""
pass
@abstractmethod
def kill(self) -> None:
"""
Description:
Kill executor.
:return:
"""
pass
@abstractmethod
def result(self) -> List[_MRResult]:
"""
Description:
Get the running result.
:return:
"""
pass
| [
"multirunnable._utils.get_cls_name"
] | [((1009, 1047), 'multirunnable._utils.get_cls_name', '_utils.get_cls_name', ([], {'cls_str': '__cls_str'}), '(cls_str=__cls_str)\n', (1028, 1047), True, 'import multirunnable._utils as _utils\n')] |
from telebot import types as t
class Buttons:
botoes = t.ReplyKeyboardMarkup(row_width=1)
botao1 = t.KeyboardButton('Dados recentes')
botao2 = t.KeyboardButton('Dados por estado')
botao3 = t.KeyboardButton('Dados por cidade')
botoes.add(botao1, botao2, botao3)
class Estados:
estados = t.InlineKeyboardMarkup(row_width=5)
AC = t.InlineKeyboardButton('AC', callback_data='AC')
AL = t.InlineKeyboardButton('AL', callback_data='AL')
AP = t.InlineKeyboardButton('AP', callback_data='AP')
AM = t.InlineKeyboardButton('AM', callback_data='AM')
BA = t.InlineKeyboardButton('BA', callback_data='BA')
CE = t.InlineKeyboardButton('CE', callback_data='CE')
DF = t.InlineKeyboardButton('DF', callback_data='DF')
ES = t.InlineKeyboardButton('ES', callback_data='ES')
GO = t.InlineKeyboardButton('GO', callback_data='GO')
MA = t.InlineKeyboardButton('MA', callback_data='MA')
MT = t.InlineKeyboardButton('MT', callback_data='MT')
MS = t.InlineKeyboardButton('MS', callback_data='MS')
MG = t.InlineKeyboardButton('MG', callback_data='MG')
PA = t.InlineKeyboardButton('PA', callback_data='PA')
PB = t.InlineKeyboardButton('PB', callback_data='PB')
PR = t.InlineKeyboardButton('PR', callback_data='PR')
PE = t.InlineKeyboardButton('PE', callback_data='PE')
PI = t.InlineKeyboardButton('PI', callback_data='PI')
RJ = t.InlineKeyboardButton('RJ', callback_data='RJ')
RN = t.InlineKeyboardButton('RN', callback_data='RN')
RS = t.InlineKeyboardButton('RS', callback_data='RS')
RO = t.InlineKeyboardButton('RO', callback_data='RO')
RR = t.InlineKeyboardButton('RR', callback_data='RR')
SC = t.InlineKeyboardButton('SC', callback_data='SC')
SP = t.InlineKeyboardButton('SP', callback_data='SP')
SE = t.InlineKeyboardButton('SE', callback_data='SE')
TO = t.InlineKeyboardButton('TO', callback_data='TO')
SIGLAS = t.InlineKeyboardButton('SIGLAS', callback_data='SIGLAS')
estados.row(AC, AL, AP, AM, BA)
estados.row(CE, DF, ES, GO, MA)
estados.row(MT, MS, MG, PA, PB)
estados.row(PR, PE, PI, RJ, RN)
estados.row(RS, RO, RR, SC, SP)
estados.row(SE, TO, SIGLAS)
class CidadeRepetida:
def __init__(self, uf, cidade):
self.uf = uf
self.markup = t.InlineKeyboardMarkup()
self.cont = len(uf)
self.cid = cidade
@property
def reply_markup(self):
buttons = [
self.markup.add(
t.InlineKeyboardButton(f'{self.cid} ({uf})', callback_data=f'{self.cid}*{uf}')) for uf in self.uf
]
return self.markup
| [
"telebot.types.InlineKeyboardButton",
"telebot.types.ReplyKeyboardMarkup",
"telebot.types.KeyboardButton",
"telebot.types.InlineKeyboardMarkup"
] | [((61, 95), 'telebot.types.ReplyKeyboardMarkup', 't.ReplyKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (82, 95), True, 'from telebot import types as t\n'), ((109, 143), 'telebot.types.KeyboardButton', 't.KeyboardButton', (['"""Dados recentes"""'], {}), "('Dados recentes')\n", (125, 143), True, 'from telebot import types as t\n'), ((157, 193), 'telebot.types.KeyboardButton', 't.KeyboardButton', (['"""Dados por estado"""'], {}), "('Dados por estado')\n", (173, 193), True, 'from telebot import types as t\n'), ((207, 243), 'telebot.types.KeyboardButton', 't.KeyboardButton', (['"""Dados por cidade"""'], {}), "('Dados por cidade')\n", (223, 243), True, 'from telebot import types as t\n'), ((314, 349), 'telebot.types.InlineKeyboardMarkup', 't.InlineKeyboardMarkup', ([], {'row_width': '(5)'}), '(row_width=5)\n', (336, 349), True, 'from telebot import types as t\n'), ((359, 407), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""AC"""'], {'callback_data': '"""AC"""'}), "('AC', callback_data='AC')\n", (381, 407), True, 'from telebot import types as t\n'), ((417, 465), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""AL"""'], {'callback_data': '"""AL"""'}), "('AL', callback_data='AL')\n", (439, 465), True, 'from telebot import types as t\n'), ((475, 523), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""AP"""'], {'callback_data': '"""AP"""'}), "('AP', callback_data='AP')\n", (497, 523), True, 'from telebot import types as t\n'), ((533, 581), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""AM"""'], {'callback_data': '"""AM"""'}), "('AM', callback_data='AM')\n", (555, 581), True, 'from telebot import types as t\n'), ((591, 639), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""BA"""'], {'callback_data': '"""BA"""'}), "('BA', callback_data='BA')\n", (613, 639), True, 'from telebot import types as t\n'), ((649, 697), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""CE"""'], {'callback_data': '"""CE"""'}), "('CE', callback_data='CE')\n", (671, 697), True, 'from telebot import types as t\n'), ((707, 755), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""DF"""'], {'callback_data': '"""DF"""'}), "('DF', callback_data='DF')\n", (729, 755), True, 'from telebot import types as t\n'), ((765, 813), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""ES"""'], {'callback_data': '"""ES"""'}), "('ES', callback_data='ES')\n", (787, 813), True, 'from telebot import types as t\n'), ((823, 871), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""GO"""'], {'callback_data': '"""GO"""'}), "('GO', callback_data='GO')\n", (845, 871), True, 'from telebot import types as t\n'), ((881, 929), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""MA"""'], {'callback_data': '"""MA"""'}), "('MA', callback_data='MA')\n", (903, 929), True, 'from telebot import types as t\n'), ((939, 987), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""MT"""'], {'callback_data': '"""MT"""'}), "('MT', callback_data='MT')\n", (961, 987), True, 'from telebot import types as t\n'), ((997, 1045), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""MS"""'], {'callback_data': '"""MS"""'}), "('MS', callback_data='MS')\n", (1019, 1045), True, 'from telebot import types as t\n'), ((1055, 1103), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""MG"""'], {'callback_data': '"""MG"""'}), "('MG', callback_data='MG')\n", (1077, 1103), True, 'from telebot import types as t\n'), ((1113, 1161), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""PA"""'], {'callback_data': '"""PA"""'}), "('PA', callback_data='PA')\n", (1135, 1161), True, 'from telebot import types as t\n'), ((1171, 1219), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""PB"""'], {'callback_data': '"""PB"""'}), "('PB', callback_data='PB')\n", (1193, 1219), True, 'from telebot import types as t\n'), ((1229, 1277), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""PR"""'], {'callback_data': '"""PR"""'}), "('PR', callback_data='PR')\n", (1251, 1277), True, 'from telebot import types as t\n'), ((1287, 1335), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""PE"""'], {'callback_data': '"""PE"""'}), "('PE', callback_data='PE')\n", (1309, 1335), True, 'from telebot import types as t\n'), ((1345, 1393), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""PI"""'], {'callback_data': '"""PI"""'}), "('PI', callback_data='PI')\n", (1367, 1393), True, 'from telebot import types as t\n'), ((1403, 1451), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""RJ"""'], {'callback_data': '"""RJ"""'}), "('RJ', callback_data='RJ')\n", (1425, 1451), True, 'from telebot import types as t\n'), ((1461, 1509), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""RN"""'], {'callback_data': '"""RN"""'}), "('RN', callback_data='RN')\n", (1483, 1509), True, 'from telebot import types as t\n'), ((1519, 1567), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""RS"""'], {'callback_data': '"""RS"""'}), "('RS', callback_data='RS')\n", (1541, 1567), True, 'from telebot import types as t\n'), ((1577, 1625), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""RO"""'], {'callback_data': '"""RO"""'}), "('RO', callback_data='RO')\n", (1599, 1625), True, 'from telebot import types as t\n'), ((1635, 1683), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""RR"""'], {'callback_data': '"""RR"""'}), "('RR', callback_data='RR')\n", (1657, 1683), True, 'from telebot import types as t\n'), ((1693, 1741), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""SC"""'], {'callback_data': '"""SC"""'}), "('SC', callback_data='SC')\n", (1715, 1741), True, 'from telebot import types as t\n'), ((1751, 1799), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""SP"""'], {'callback_data': '"""SP"""'}), "('SP', callback_data='SP')\n", (1773, 1799), True, 'from telebot import types as t\n'), ((1809, 1857), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""SE"""'], {'callback_data': '"""SE"""'}), "('SE', callback_data='SE')\n", (1831, 1857), True, 'from telebot import types as t\n'), ((1867, 1915), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""TO"""'], {'callback_data': '"""TO"""'}), "('TO', callback_data='TO')\n", (1889, 1915), True, 'from telebot import types as t\n'), ((1929, 1985), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['"""SIGLAS"""'], {'callback_data': '"""SIGLAS"""'}), "('SIGLAS', callback_data='SIGLAS')\n", (1951, 1985), True, 'from telebot import types as t\n'), ((2301, 2325), 'telebot.types.InlineKeyboardMarkup', 't.InlineKeyboardMarkup', ([], {}), '()\n', (2323, 2325), True, 'from telebot import types as t\n'), ((2488, 2566), 'telebot.types.InlineKeyboardButton', 't.InlineKeyboardButton', (['f"""{self.cid} ({uf})"""'], {'callback_data': 'f"""{self.cid}*{uf}"""'}), "(f'{self.cid} ({uf})', callback_data=f'{self.cid}*{uf}')\n", (2510, 2566), True, 'from telebot import types as t\n')] |
from random import sample
from urllib.parse import urljoin
import click
import requests
from .settings import API_KEY, SOURCES, TOP_HEAD
SOURCE_URL = urljoin(SOURCES, '?apiKey={}'.format(API_KEY))
# TODO: Try a deliberate failure to see how the remote API responds
# so that your code knows how to catch those exceptions properly,
sources = requests.get(SOURCE_URL)
if sources.ok:
source_list = [source['name'] for source in sources.json()['sources']]
full_sources_list = {
source['name']: source['id'] for source in sources.json()['sources']
}
rand_list = sample(source_list, 4)
@click.command()
def get_news_items():
"""
This is a script that takes in a choice from you
our wonderful user and returns the top 10 news
headlines of the day to you from your source
of choice.
choice: A string that should match the names of the sources
shown on the screen.
If not, you'll probably get an
error of some sort. Ideally 'Invalid Choice' but
you may get something more strange.
The returned information will contain
the title, description and link of said headline.
This shall allow you to follow up on the story
should you wish to.
"""
choice = click.prompt(
'Enter a valid news source from the list shown',
type=click.Choice(rand_list)
)
if choice in rand_list:
source_id = full_sources_list[choice]
top_headlines = urljoin(
TOP_HEAD, '?sources={}&apiKey={}'.format(source_id, API_KEY)
)
resp = requests.get(top_headlines).json()
for news_item in resp['articles']:
print({
'title': news_item['title'],
'description': news_item['description'],
'url': news_item['url']
})
else:
return 'Invalid Choice'
| [
"click.Choice",
"random.sample",
"click.command",
"requests.get"
] | [((346, 370), 'requests.get', 'requests.get', (['SOURCE_URL'], {}), '(SOURCE_URL)\n', (358, 370), False, 'import requests\n'), ((613, 628), 'click.command', 'click.command', ([], {}), '()\n', (626, 628), False, 'import click\n'), ((587, 609), 'random.sample', 'sample', (['source_list', '(4)'], {}), '(source_list, 4)\n', (593, 609), False, 'from random import sample\n'), ((1323, 1346), 'click.Choice', 'click.Choice', (['rand_list'], {}), '(rand_list)\n', (1335, 1346), False, 'import click\n'), ((1561, 1588), 'requests.get', 'requests.get', (['top_headlines'], {}), '(top_headlines)\n', (1573, 1588), False, 'import requests\n')] |
###This was mainly made to test the pandas_datareader library.
###Libraries
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas_datareader as web
import datetime
###Base info
start = datetime.datetime(2000,1,1)
end = datetime.datetime.now()
stock='GOOG'
df= web.DataReader(stock,'google',start,end)
###Dash Base
app = dash.Dash()
app.title='Dynamic Stock Tracker'
app.layout = html.Div(children=[
html.Div(children='''
Stock
'''),
dcc.Input(id='input',value='',type='text'),
html.Div(id='output-graph')
])
###Callback
@app.callback(
Output(component_id='output-graph',component_property='children'),
[Input(component_id='input',component_property='value')]
)
###Main Definition
def update_graph(input_data):
###Update Data
start = datetime.datetime(2000,1,1)
end = datetime.datetime.now()
df= web.DataReader(input_data,'google',start,end)
return dcc.Graph(
id='example-graph',
figure={
'data':[
{'x':df.index,'y':df.High,'type':'line','name':input_data}
],
'layout':{
'title':input_data
}
}
)
###Run Server
if __name__ == '__main__':
app.run_server(debug=True) | [
"datetime.datetime",
"pandas_datareader.DataReader",
"dash.dependencies.Output",
"dash.dependencies.Input",
"datetime.datetime.now",
"dash_html_components.Div",
"dash_core_components.Graph",
"dash_core_components.Input",
"dash.Dash"
] | [((272, 301), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (289, 301), False, 'import datetime\n'), ((306, 329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (327, 329), False, 'import datetime\n'), ((347, 390), 'pandas_datareader.DataReader', 'web.DataReader', (['stock', '"""google"""', 'start', 'end'], {}), "(stock, 'google', start, end)\n", (361, 390), True, 'import pandas_datareader as web\n'), ((407, 418), 'dash.Dash', 'dash.Dash', ([], {}), '()\n', (416, 418), False, 'import dash\n'), ((872, 901), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (889, 901), False, 'import datetime\n'), ((910, 933), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (931, 933), False, 'import datetime\n'), ((942, 990), 'pandas_datareader.DataReader', 'web.DataReader', (['input_data', '"""google"""', 'start', 'end'], {}), "(input_data, 'google', start, end)\n", (956, 990), True, 'import pandas_datareader as web\n'), ((999, 1151), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""example-graph"""', 'figure': "{'data': [{'x': df.index, 'y': df.High, 'type': 'line', 'name': input_data}\n ], 'layout': {'title': input_data}}"}), "(id='example-graph', figure={'data': [{'x': df.index, 'y': df.High,\n 'type': 'line', 'name': input_data}], 'layout': {'title': input_data}})\n", (1008, 1151), True, 'import dash_core_components as dcc\n'), ((658, 724), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""output-graph"""', 'component_property': '"""children"""'}), "(component_id='output-graph', component_property='children')\n", (664, 724), False, 'from dash.dependencies import Input, Output\n'), ((730, 785), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""input"""', 'component_property': '"""value"""'}), "(component_id='input', component_property='value')\n", (735, 785), False, 'from dash.dependencies import Input, Output\n'), ((490, 538), 'dash_html_components.Div', 'html.Div', ([], {'children': '"""\n Stock\n """'}), '(children="""\n Stock\n """)\n', (498, 538), True, 'import dash_html_components as html\n'), ((544, 588), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input"""', 'value': '""""""', 'type': '"""text"""'}), "(id='input', value='', type='text')\n", (553, 588), True, 'import dash_core_components as dcc\n'), ((592, 619), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""output-graph"""'}), "(id='output-graph')\n", (600, 619), True, 'import dash_html_components as html\n')] |
from flask import Flask, request,redirect, abort,render_template,session,copy_current_request_context
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import LoginManager, login_user, current_user, UserMixin
from flask_socketio import SocketIO, emit, send
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_socketio import join_room, leave_room
from .forms import LoginForm, MessageForm, PasswordForm, DeleteUser
from flask_dropzone import Dropzone
from flask_cors import CORS, cross_origin
import locale
import os
import datetime
import time
import hashlib
from app.db import db
import sys
sys.path.append("transfchatbot")
from botapi import botapi
from werkzeug.middleware.proxy_fix import ProxyFix
locale.setlocale(locale.LC_TIME, "fr_FR.UTF-8")
bot=True
app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
#app.secret_key ='<KEY>'
app.config['DROPZONE_UPLOAD_MULTIPLE'] = False
app.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True
app.config['DROPZONE_ALLOWED_FILE_TYPE'] = '.jpg, .png'#'image/*'
app.config['DROPZONE_REDIRECT_VIEW'] = 'results'
app.config['UPLOADED_PHOTOS_DEST'] = os.getcwd() + '/images'
app.config['DROPZONE_DEFAULT_MESSAGE'] ="+"
app.config['DROPZONE_INVALID_FILE_TYPE'] ="Formats acceptés : .jpg, .png"
app.config['DROPZONE_FILE_TOO_BIG'] ="La taille de l'image est trop grande ( {{filesize}} ), merci de ne pas dépasser {{maxFilesize}}Mo."
app.config['DROPZONE_SERVER_ERROR'] ="Erreur de chargement de l'image"
app.config['SECRET_KEY'] = 'maarionnbooot'
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app) # set maximum file size, default is 16MB
DB=db()
BOT=botapi()
dropzone = Dropzone(app)
login = LoginManager(app)
cors = CORS(app,resources={r"/*":{"origins":"*"}})
socketio = SocketIO(app,cors_allowed_origins=["https://turfu-festival.ddns.net","https://www.esam-c2.fr","http://htmlfiesta.com","https://marionbot.ddns.net"])
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_proto=1)
clients={}
@cross_origin(origin='*',headers=['Content- Type','Authorization'])
@login.user_loader
def user_loader(id):
return User(id)
class User(UserMixin):
def __init__(self, username):
self.id = username
@app.route('/')
@app.route('/home')
@app.route('/index', methods=['GET', 'POST'])
def index():
browser = request.user_agent.browser
form = MessageForm()
session["ismarion"]=False
if "user" not in session:
return redirect("/login")
if session["user"]["id"]==1:
session["ismarion"]=True
speakers=DB.getChats()
session["chatid"]=speakers[0]["id"]
return render_template("indexmarion.html", title="Marion", form=form,speakers=speakers,to=session["chatid"],browser=browser)
else:
#session["chatid"]=#session["user"]["id"]
if "chatid" not in session:
session["chatid"]=DB.getUserChatId(session["user"]["id"])
session["access"]=DB.getUserAccess(session["user"]["id"])
return render_template("index.html", title="Marion", form=form,to=session["chatid"],browser=browser,useraccess=session["access"])
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
if g.user and g.user.is_authenticated:
flash("You are already logged in")
return redirect('/index')
"""
form = LoginForm()
if form.validate_on_submit():
"""
users=DB.userExists(form.username.data)
if len(users) != 1:
flash('User %s does not exist' %
(form.username.data))
return render_template('login.html',
title='Sign In',
form=form)
"""
#user = users.first()
user=DB.checkUserPassword(form.username.data,encript(form.password.data))
if not user:
#if not user.check_password(form.password.data):
flash('Mot de passe incorrect')
return render_template('login.html',
title='Sign In',
form=form)
session["user"]=user
if user["id"]==1:
session["ismarion"]=True
DB.setMarionStatus()
else:
session["chatid"]=DB.getUserChatId(session["user"]["id"])
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form)
@app.route('/messages',methods=['GET'])
def get_messages():
#msgs = Message.query.order_by(Message.id.desc()).limit(20).all()[::-1]
speakerID=request.args.get("id")
lastMsg=request.args.get("last")
msgs=False
if speakerID:
#marion custom speaker
msgs = DB.getMessages(speakerID,lastMsg)
else:
#user with marion
#print('session["chatid"]',session["chatid"])
if "chatid" in session:
msgs = DB.getMessages(session["chatid"],lastMsg)
if msgs:
return render_template("messages.html", messages=msgs,current=current_user)
return ""
@app.route('/upload',methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
f = request.files.get('file')
extension=os.path.splitext(f.filename)[1]
fileName="marion_"+str(session["user"]["id"])+str(time.time())+extension
f.save(os.path.join('app/static/images',fileName))
#save message
chatid=int(request.values["to"])
DB.saveMessage(session["user"]["id"],chatid, fileName,1)
return render_template('uploaded.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = LoginForm()
if form.validate_on_submit():
username=form.username.data
# Check if users with this name exists
if DB.userExists(username):
flash("Ce pseudonyme est déjà pris, merci d’en choisir un autre")
return render_template('register.html',
title='register',
form=form)
if len(form.password.data) < 5:
flash("Le mot de passe doit contenir au moins 5 caractères")
return render_template('register.html',
title='register',
form=form)
#create user
hashed=encript(form.password.data)
userID,chatID=DB.createUser(username,1,hashed)
session["user"]={"id":userID,"name":username}
session["chatid"]=chatID
return redirect("/index")
return render_template('register.html',
title='register',
form=form)
@app.route('/guest')
def initguest():
ts = int(time.time())
username="guest_"+str(ts)
userID,chatID=DB.createUser(username,0)
session["user"]={"id":userID,"name":username}
session["chatid"]=DB.getUserChatId(session["user"]["id"])
#session["chatid"]=chatID
print("user",session["user"],"chatid",session["chatid"])
print("")
session["ismarion"]=False
#session["chatid"]=DB.getUserChatId(session["user"]["id"])
#session["chatid"]=DB.getUserChatId(session["user"]["id"])
return redirect("/index")
# MARION ADMIN :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@app.route('/archiveuser', methods=['GET'])
def archiveuser():
if not isAdmin():
return redirect("/")
chatID=request.args.get("id")
DB.archiveChat(chatID)
return ""
@app.route('/markasseen', methods=['POST'])
def markasseen():
#seenmessages=request.get_data(parse_form_data=False)
seenmessages=request.form.getlist("seen[]")
print("seenmessages",seenmessages)
return ""
@app.route('/updatechatid', methods=['POST'])
def updatechatid():
if not isAdmin():
return redirect("/")
chatID=request.form.get("chatid")
session["chatid"]=chatID
#update notseen to 0
DB.removeNotSeen(chatID)
#print("/messages?id="+str(chatID))
#return redirect("/messages?id="+str(chatID))
#print(session["chatid"])
msgs = DB.getMessages(session["chatid"],False)
return render_template("messages.html", messages=msgs,current=current_user)
@app.route('/speakers',methods=['GET'])
def get_speakers():
speakerID=request.args.get("speaker")
if not isAdmin():
return redirect("/")
DB.setMarionStatus()
speakers=DB.getChats()
return render_template("speakers.html", title="speakers", speakers=speakers,activespeaker=speakerID)
# END MARION ADMIN :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@app.template_filter('strftime')
def _jinja2_filter_datetime(date, fmt=None):
date = datetime.datetime.fromtimestamp(date)
return date.strftime("%a %d %b %H:%M")
#SocketIO:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@socketio.on('imconnected')
def on_connect():
print("imconnected recived")
print(session)
if "user" in session:
print("userid",session["user"]["id"])
print("NEW SESSION ",request.sid)
#clients.append(request.sid)
#room = session["user"]["id"]
clients[session["user"]["id"]]=request.sid
room=session.get('room')
join_room(room)
print("room",room)
print("clients",clients)
emit('welcome', {'userid': str(session["user"]["id"])},room=clients[session["user"]["id"]])
@socketio.on('heartbeat')
def heartbeat():
#print("heartbeat")
if "last" in session:
last=session["last"]
msgs = DB.getMessages(session["chatid"],last)
htmlmessages=render_template("messages.html", messages=msgs,current=session["user"]["id"])
emit("newmessages",htmlmessages,room=clients[session["user"]["id"]])
emit("heartbeat")
@socketio.on('browser_ready')
def handle_browser_ready(data):
print("GOT BROWSER READY!",data)
if "chatid" in data["data"]:
print("get messages from",data["data"]["chatid"])
msgs = DB.getMessages(data["data"]["chatid"],False)
session["chatid"]=int(data["data"]["chatid"])
session["user"]["id"]=int(data["data"]["speakerid"])
else:
msgs = DB.getMessages(session["chatid"],False)
htmlmessages=render_template("messages.html", messages=msgs,current=current_user)
#XXX FIX NEXT LINE
emit("initmessages",htmlmessages,room=clients[session["user"]["id"]])
emit("heartbeat")
@socketio.on('post')
def handle_post(data):
print("POSTED NEW MESSAGE",session["user"]["id"],data)
message=data["message"]
last=False
if "last" in data:
last=data["last"]
session["last"]=last
if session["ismarion"]:
print("SAVING MESSAGE IS MARION WRITING")
print("SESSION USER ID",session["user"]["id"])
#chatid=1
#print("chatid marion",chatid)
DB.saveMessage(1,session["chatid"], message)
else:
DB.saveMessage(session["user"]["id"],session["chatid"], message)
#msgs = DB.getMessages(session["chatid"],last)
msgs = DB.getMessages(session["chatid"])
if bot:
#https://stackoverflow.com/questions/34581255/python-flask-socketio-send-message-from-thread-not-always-working
bott=socketio.start_background_task(BOT.manageTalk,session["user"]["id"],session["chatid"],message,DB,msgs,botError)
htmlmessages=render_template("messages.html", messages=msgs,current=session["user"]["id"])
#print("htmlnewmessages",htmlmessages)
print("EMIT TO",clients[session["user"]["id"]])
emit("newmessages",htmlmessages,room=clients[session["user"]["id"]])
#functions::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def botError(nothing):
emit("boterror",True)
def isAdmin():
if session["ismarion"]:
return True
return False
def encript(password):
hashed=hashlib.md5(password.encode('utf-8'))
hashed=str(hashed.hexdigest())
return hashed
| [
"flask.render_template",
"flask_login.LoginManager",
"flask.request.args.get",
"flask_uploads.UploadSet",
"flask_cors.CORS",
"flask.Flask",
"flask_cors.cross_origin",
"flask_socketio.SocketIO",
"app.db.db",
"flask_socketio.join_room",
"flask_uploads.configure_uploads",
"sys.path.append",
"fl... | [((685, 717), 'sys.path.append', 'sys.path.append', (['"""transfchatbot"""'], {}), "('transfchatbot')\n", (700, 717), False, 'import sys\n'), ((796, 843), 'locale.setlocale', 'locale.setlocale', (['locale.LC_TIME', '"""fr_FR.UTF-8"""'], {}), "(locale.LC_TIME, 'fr_FR.UTF-8')\n", (812, 843), False, 'import locale\n'), ((861, 876), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (866, 876), False, 'from flask import Flask, request, redirect, abort, render_template, session, copy_current_request_context\n'), ((885, 930), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (889, 930), False, 'from flask_cors import CORS, cross_origin\n'), ((1661, 1688), 'flask_uploads.UploadSet', 'UploadSet', (['"""photos"""', 'IMAGES'], {}), "('photos', IMAGES)\n", (1670, 1688), False, 'from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\n'), ((1689, 1719), 'flask_uploads.configure_uploads', 'configure_uploads', (['app', 'photos'], {}), '(app, photos)\n', (1706, 1719), False, 'from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\n'), ((1720, 1744), 'flask_uploads.patch_request_class', 'patch_request_class', (['app'], {}), '(app)\n', (1739, 1744), False, 'from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\n'), ((1791, 1795), 'app.db.db', 'db', ([], {}), '()\n', (1793, 1795), False, 'from app.db import db\n'), ((1800, 1808), 'botapi.botapi', 'botapi', ([], {}), '()\n', (1806, 1808), False, 'from botapi import botapi\n'), ((1821, 1834), 'flask_dropzone.Dropzone', 'Dropzone', (['app'], {}), '(app)\n', (1829, 1834), False, 'from flask_dropzone import Dropzone\n'), ((1844, 1861), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (1856, 1861), False, 'from flask_login import LoginManager, login_user, current_user, UserMixin\n'), ((1869, 1914), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (1873, 1914), False, 'from flask_cors import CORS, cross_origin\n'), ((1924, 2084), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'cors_allowed_origins': "['https://turfu-festival.ddns.net', 'https://www.esam-c2.fr',\n 'http://htmlfiesta.com', 'https://marionbot.ddns.net']"}), "(app, cors_allowed_origins=['https://turfu-festival.ddns.net',\n 'https://www.esam-c2.fr', 'http://htmlfiesta.com',\n 'https://marionbot.ddns.net'])\n", (1932, 2084), False, 'from flask_socketio import SocketIO, emit, send\n'), ((2089, 2131), 'werkzeug.middleware.proxy_fix.ProxyFix', 'ProxyFix', (['app.wsgi_app'], {'x_for': '(1)', 'x_proto': '(1)'}), '(app.wsgi_app, x_for=1, x_proto=1)\n', (2097, 2131), False, 'from werkzeug.middleware.proxy_fix import ProxyFix\n'), ((2145, 2213), 'flask_cors.cross_origin', 'cross_origin', ([], {'origin': '"""*"""', 'headers': "['Content- Type', 'Authorization']"}), "(origin='*', headers=['Content- Type', 'Authorization'])\n", (2157, 2213), False, 'from flask_cors import CORS, cross_origin\n'), ((1206, 1217), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1215, 1217), False, 'import os\n'), ((4478, 4535), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Sign In"""', 'form': 'form'}), "('login.html', title='Sign In', form=form)\n", (4493, 4535), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((4741, 4763), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (4757, 4763), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((4776, 4800), 'flask.request.args.get', 'request.args.get', (['"""last"""'], {}), "('last')\n", (4792, 4800), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((5673, 5705), 'flask.render_template', 'render_template', (['"""uploaded.html"""'], {}), "('uploaded.html')\n", (5688, 5705), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6696, 6757), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""register"""', 'form': 'form'}), "('register.html', title='register', form=form)\n", (6711, 6757), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((7338, 7356), 'flask.redirect', 'redirect', (['"""/index"""'], {}), "('/index')\n", (7346, 7356), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((7565, 7587), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (7581, 7587), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((7769, 7799), 'flask.request.form.getlist', 'request.form.getlist', (['"""seen[]"""'], {}), "('seen[]')\n", (7789, 7799), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((7982, 8008), 'flask.request.form.get', 'request.form.get', (['"""chatid"""'], {}), "('chatid')\n", (7998, 8008), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((8278, 8347), 'flask.render_template', 'render_template', (['"""messages.html"""'], {'messages': 'msgs', 'current': 'current_user'}), "('messages.html', messages=msgs, current=current_user)\n", (8293, 8347), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((8422, 8449), 'flask.request.args.get', 'request.args.get', (['"""speaker"""'], {}), "('speaker')\n", (8438, 8449), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((8564, 8662), 'flask.render_template', 'render_template', (['"""speakers.html"""'], {'title': '"""speakers"""', 'speakers': 'speakers', 'activespeaker': 'speakerID'}), "('speakers.html', title='speakers', speakers=speakers,\n activespeaker=speakerID)\n", (8579, 8662), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((8830, 8867), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['date'], {}), '(date)\n', (8861, 8867), False, 'import datetime\n'), ((9911, 9928), 'flask_socketio.emit', 'emit', (['"""heartbeat"""'], {}), "('heartbeat')\n", (9915, 9928), False, 'from flask_socketio import SocketIO, emit, send\n'), ((10378, 10447), 'flask.render_template', 'render_template', (['"""messages.html"""'], {'messages': 'msgs', 'current': 'current_user'}), "('messages.html', messages=msgs, current=current_user)\n", (10393, 10447), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((10474, 10545), 'flask_socketio.emit', 'emit', (['"""initmessages"""', 'htmlmessages'], {'room': "clients[session['user']['id']]"}), "('initmessages', htmlmessages, room=clients[session['user']['id']])\n", (10478, 10545), False, 'from flask_socketio import SocketIO, emit, send\n'), ((10548, 10565), 'flask_socketio.emit', 'emit', (['"""heartbeat"""'], {}), "('heartbeat')\n", (10552, 10565), False, 'from flask_socketio import SocketIO, emit, send\n'), ((11490, 11568), 'flask.render_template', 'render_template', (['"""messages.html"""'], {'messages': 'msgs', 'current': "session['user']['id']"}), "('messages.html', messages=msgs, current=session['user']['id'])\n", (11505, 11568), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((11667, 11737), 'flask_socketio.emit', 'emit', (['"""newmessages"""', 'htmlmessages'], {'room': "clients[session['user']['id']]"}), "('newmessages', htmlmessages, room=clients[session['user']['id']])\n", (11671, 11737), False, 'from flask_socketio import SocketIO, emit, send\n'), ((11846, 11868), 'flask_socketio.emit', 'emit', (['"""boterror"""', '(True)'], {}), "('boterror', True)\n", (11850, 11868), False, 'from flask_socketio import SocketIO, emit, send\n'), ((2595, 2613), 'flask.redirect', 'redirect', (['"""/login"""'], {}), "('/login')\n", (2603, 2613), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((2771, 2896), 'flask.render_template', 'render_template', (['"""indexmarion.html"""'], {'title': '"""Marion"""', 'form': 'form', 'speakers': 'speakers', 'to': "session['chatid']", 'browser': 'browser'}), "('indexmarion.html', title='Marion', form=form, speakers=\n speakers, to=session['chatid'], browser=browser)\n", (2786, 2896), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((3136, 3266), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': '"""Marion"""', 'form': 'form', 'to': "session['chatid']", 'browser': 'browser', 'useraccess': "session['access']"}), "('index.html', title='Marion', form=form, to=session[\n 'chatid'], browser=browser, useraccess=session['access'])\n", (3151, 3266), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((4448, 4466), 'flask.redirect', 'redirect', (['"""/index"""'], {}), "('/index')\n", (4456, 4466), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((5126, 5195), 'flask.render_template', 'render_template', (['"""messages.html"""'], {'messages': 'msgs', 'current': 'current_user'}), "('messages.html', messages=msgs, current=current_user)\n", (5141, 5195), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((5315, 5340), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (5332, 5340), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6666, 6684), 'flask.redirect', 'redirect', (['"""/index"""'], {}), "('/index')\n", (6674, 6684), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6864, 6875), 'time.time', 'time.time', ([], {}), '()\n', (6873, 6875), False, 'import time\n'), ((7540, 7553), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (7548, 7553), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((7957, 7970), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (7965, 7970), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((8487, 8500), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (8495, 8500), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((9348, 9367), 'flask.session.get', 'session.get', (['"""room"""'], {}), "('room')\n", (9359, 9367), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((9376, 9391), 'flask_socketio.join_room', 'join_room', (['room'], {}), '(room)\n', (9385, 9391), False, 'from flask_socketio import join_room, leave_room\n'), ((9752, 9830), 'flask.render_template', 'render_template', (['"""messages.html"""'], {'messages': 'msgs', 'current': "session['user']['id']"}), "('messages.html', messages=msgs, current=session['user']['id'])\n", (9767, 9830), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((9838, 9908), 'flask_socketio.emit', 'emit', (['"""newmessages"""', 'htmlmessages'], {'room': "clients[session['user']['id']]"}), "('newmessages', htmlmessages, room=clients[session['user']['id']])\n", (9842, 9908), False, 'from flask_socketio import SocketIO, emit, send\n'), ((4046, 4077), 'flask.flash', 'flash', (['"""Mot de passe incorrect"""'], {}), "('Mot de passe incorrect')\n", (4051, 4077), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((4097, 4154), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Sign In"""', 'form': 'form'}), "('login.html', title='Sign In', form=form)\n", (4112, 4154), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((5359, 5387), 'os.path.splitext', 'os.path.splitext', (['f.filename'], {}), '(f.filename)\n', (5375, 5387), False, 'import os\n'), ((5487, 5530), 'os.path.join', 'os.path.join', (['"""app/static/images"""', 'fileName'], {}), "('app/static/images', fileName)\n", (5499, 5530), False, 'import os\n'), ((5961, 6026), 'flask.flash', 'flash', (['"""Ce pseudonyme est déjà pris, merci d’en choisir un autre"""'], {}), "('Ce pseudonyme est déjà pris, merci d’en choisir un autre')\n", (5966, 6026), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6046, 6107), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""register"""', 'form': 'form'}), "('register.html', title='register', form=form)\n", (6061, 6107), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6231, 6291), 'flask.flash', 'flash', (['"""Le mot de passe doit contenir au moins 5 caractères"""'], {}), "('Le mot de passe doit contenir au moins 5 caractères')\n", (6236, 6291), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((6311, 6372), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""register"""', 'form': 'form'}), "('register.html', title='register', form=form)\n", (6326, 6372), False, 'from flask import render_template, flash, redirect, session, url_for, request, g\n'), ((5449, 5460), 'time.time', 'time.time', ([], {}), '()\n', (5458, 5460), False, 'import time\n')] |
import configparser
from datetime import datetime
from pyrogram import Client, Filters, Emoji
config = configparser.ConfigParser()
config.read("config.ini")
prefixes = list(config["prefixes"].keys())
chatinfo_message = {"id": f"{Emoji.ID_BUTTON} <b>Id</b>: <code>[%id%]</code>",
"type": f"{Emoji.JAPANESE_SYMBOL_FOR_BEGINNER} <b>Type</b>: <code>[%type%]</code>",
"title": f"{Emoji.FLEUR_DE_LIS} <b>Title</b>: <code>[%title%]</code>",
"invite_link": f"{Emoji.LINK} <b>Invite Link</b>: <code>[%invite_link%]</code>",
"first_name": f"{Emoji.BLOND_HAIRED_MAN_LIGHT_SKIN_TONE} <b>Name</b>: <code>[%first_name%]</code>",
"last_name": f"{Emoji.BUST_IN_SILHOUETTE} <b>Last Name</b>: <code>[%last_name%]</code>",
"username": f"{Emoji.LINK} <b>Username</b>: <code>[%username%]</code>",
"dc_id": f"{Emoji.DESKTOP_COMPUTER} <b>Dc</b>: <code>[%dc_id%]</code>",
"status": f"{Emoji.MOBILE_PHONE_WITH_ARROW} <b>Status</b>: <code>[%status%]</code>",
"last_online_date": f"{Emoji.TWELVE_O_CLOCK} <b>Last Online Date</b>: <code>["
f"%last_online_date%]</code>",
"next_offline_date": f"{Emoji.SEVEN_THIRTY} <b>Next Offline Date</b>: <code>["
f"%next_offline_date%]</code>",
"is_bot": f"{Emoji.ROBOT_FACE} <b>Is Bot</b>: <code>[%is_bot%]</code>",
"is_contact": f"{Emoji.TELEPHONE} <b>Is Contact</b>: <code>[%is_contact%]</code>",
"is_mutual_contact": f"{Emoji.MOBILE_PHONE} <b>Is Mutual Contact</b>: <code>["
f"%is_mutual_contact%]</code>",
"is_scam": f"{Emoji.CROSS_MARK} <b>Is scam</b>: <code>[%is_scam%]</code>",
"sticker_set_name": f"{Emoji.DIAMOND_WITH_A_DOT} <b>Sticker Set</b>: <code>["
f"%sticker_set_name%]</code>",
"members_count": f"{Emoji.FAMILY_MAN_WOMAN_GIRL_BOY} <b>Members</b>: "
f"<code>[%members_count%]</code>",
"bio": f"{Emoji.TRIDENT_EMBLEM} <b>Bio</b>: <code>[%bio%]</code>"}
@Client.on_message(Filters.user("self") & Filters.command("chatinfo", prefixes=prefixes))
def chatinfo_command(c, msg):
target = c.get_chat(msg.chat.id)
message = "{} Info {}\n\n".format(Emoji.INFORMATION, Emoji.INFORMATION)
for key in chatinfo_message:
try:
message += chatinfo_message[key].replace(
f"[%{key}%]", str(
(
target[key] if key != "next_offline_date" and key != "last_online_date" else (
datetime.fromtimestamp(int(target[key])).strftime("%H:%M:%S %d/%m/%y")
)
) if target[key] else (
target["raise AttributeError()"])
)
) + "\n"
except AttributeError:
pass
bio = c.get_chat(target.id).description
if bio:
message += chatinfo_message["bio"].replace("[%bio%]", c.get_chat(target.id).description)
message += f"\n\n<a href=\"tg://user?id={target.id}\">Profile Link</a>"
msg.edit_text(message)
print("[MultiUserbot] Loaded \"chatinfo.py\" plugin")
| [
"pyrogram.Filters.user",
"configparser.ConfigParser",
"pyrogram.Filters.command"
] | [((105, 132), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (130, 132), False, 'import configparser\n'), ((2340, 2360), 'pyrogram.Filters.user', 'Filters.user', (['"""self"""'], {}), "('self')\n", (2352, 2360), False, 'from pyrogram import Client, Filters, Emoji\n'), ((2363, 2409), 'pyrogram.Filters.command', 'Filters.command', (['"""chatinfo"""'], {'prefixes': 'prefixes'}), "('chatinfo', prefixes=prefixes)\n", (2378, 2409), False, 'from pyrogram import Client, Filters, Emoji\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
class objImpresoras():
def __init__(self):
self.lista=[]
if os.name == 'nt': #Windows
import win32print
for i in range (1,6):
for p in win32print.EnumPrinters(i):
if not (p[2] in self.lista):
self.lista.append (p[2])
else: #linux
p = subprocess.Popen(["lpstat", "-a"], stdout=subprocess.PIPE)
output, err = p.communicate()
#print (output)
output = output.split(b'\n')
for p in output:
p=p.split(b' ')
if len (p[0].strip())>0:
self.lista.append (p[0])
print (self.lista)
def getLista(self):
return self.lista
if __name__ == "__main__":
obj=objImpresoras()
print (obj.getLista())
#1:49 archivo de imagen | [
"subprocess.Popen",
"win32print.EnumPrinters"
] | [((345, 403), 'subprocess.Popen', 'subprocess.Popen', (["['lpstat', '-a']"], {'stdout': 'subprocess.PIPE'}), "(['lpstat', '-a'], stdout=subprocess.PIPE)\n", (361, 403), False, 'import subprocess\n'), ((228, 254), 'win32print.EnumPrinters', 'win32print.EnumPrinters', (['i'], {}), '(i)\n', (251, 254), False, 'import win32print\n')] |
import argparse
import numpy as np
from scipy import sparse
from scipy.optimize import linprog
import matplotlib.pyplot as plt
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import TensorDataset, DataLoader
from captum.attr import IntegratedGradients
def temp_make_energy():
""" I tried to formulate the energy problem as a LP formulation, so defining c, A, and b. I am almost entirely
certain that this is in fact possible but I had too many problems and did not want to invest to much time in this.
So I stopped trying. """
time = 8760
# inputs:
c_pv = 1
c_bat = 1
c_buy = 1
demand = np.genfromtxt("data/TS_Demand.csv").reshape((time, 1))
avail_pv = np.genfromtxt("data/TS_PVAvail.csv").reshape((time, 1))
# cost vector
c_buy = np.full(time, c_buy)
c_zeros = np.zeros(time*5) # for the right dimensionality
c = np.concatenate((np.array([c_pv]), np.array([c_bat]), c_buy, c_zeros))
x_dim = c.shape[0]
# how does x look like? (everything with "(T)" contains T (=time) elements, one for each time step)
# c_ap_pv, c_ap_bat_s, p_buy (T), p_pv (T), p_bat_out (T), p_bat_in (T), p_bat_s (T)
# constraints
# energy balance (maybe remove the equality and make it an inequality (>= Demand, or <= - Demand only)
a_energy_balance = sparse.lil_matrix((2*time, x_dim))
b_energy_balance = sparse.lil_matrix((2*time, 1))
for t in range(time): # this can definitely be written more efficiently, for now I just want it correct though
a_energy_balance[t * 2, 2 + t] = 1 # p_buy(t)
a_energy_balance[t * 2, 2 + time + t] = 1 # p_pv(t)
a_energy_balance[t * 2, 2 + 2 * time + t] = 1 # p_bat_out(t)
a_energy_balance[t * 2, 2 + 3 * time + t] = -1 # p_bat_in(t)
b_energy_balance[t * 2] = demand[t]
a_energy_balance[t * 2 + 1, 2 + t] = -1 # p_buy(t)
a_energy_balance[t * 2 + 1, 2 + time + t] = -1 # p_pv(t)
a_energy_balance[t * 2 + 1, 2 + 2 * time + t] = -1 # p_bat_out(t)
a_energy_balance[t * 2 + 1, 2 + 3 * time + t] = 1 # p_bat_in(t)
b_energy_balance[t * 2 + 1] = -demand[t]
# battery equation
a_battery_equation = sparse.lil_matrix((2 * (time - 1), x_dim))
b_battery_equation = sparse.lil_matrix((2 * (time - 1), 1)) # just stays zero, so that is fine
for t in range(1, time):
a_battery_equation[(t - 1) * 2, 2 + 4 * time + t] = 1 # p_bat_s (t)
a_battery_equation[(t - 1) * 2, 2 + 4 * time + t - 1] = -1 # p_bat_s (t - 1)
a_battery_equation[(t - 1) * 2, 2 + 3 * time + t] = -1 # p_bat_in (t)
a_battery_equation[(t - 1) * 2, 2 + 2 * time + t] = 1 # p_bat_out (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 4 * time + t] = -1 # p_bat_s (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 4 * time + t - 1] = 1 # p_bat_s (t - 1)
a_battery_equation[(t - 1) * 2 + 1, 2 + 3 * time + t] = 1 # p_bat_in (t)
a_battery_equation[(t - 1) * 2 + 1, 2 + 2 * time + t] = -1 # p_bat_out (t)
# pv production limit (0 <= p_pv (t) always given per LP definition (x >= 0))
# lifetime missing (delta t I think) --> but why not for battery (per slides: not, per code: yes)
a_pv_production_limit = sparse.lil_matrix((time, x_dim))
b_pv_production_limit = sparse.lil_matrix((time, 1)) # just stays zero, so that is fine
for t in range(time):
a_pv_production_limit[t, 2 + time + t] = 1
a_pv_production_limit[t, 0] = -avail_pv[t]
# battery charge limit (0 <= p_bat_in (t) always given per LP definition (x >= 0))
a_battery_charge_limit = sparse.lil_matrix((time, x_dim))
b_battery_charge_limit = sparse.lil_matrix((time, 1)) # just stays zero, so that is fine
for t in range(time):
a_battery_charge_limit[t, 2 + 2 * time + t] = 1
a_battery_charge_limit[t, 1] = -1
# battery initial state
a_battery_initial_state = sparse.lil_matrix((2, x_dim))
b_battery_initial_state = sparse.lil_matrix((2, 1)) # just stays zero, so that is fine
a_battery_initial_state[0, 2 + 4 * time] = 1
a_battery_initial_state[0, 2 + 4 * time] = -1 # maybe not necessary because of x >= 0
# power purchase limit (0 <= p_buy (t) always given per LP definition (x >= 0))
# concatenate for constraint matrix a and vector b
a = sparse.vstack(
(a_energy_balance, a_battery_equation, a_pv_production_limit, a_battery_charge_limit, a_battery_initial_state))
b = sparse.vstack(
(b_energy_balance, b_battery_equation, b_pv_production_limit, b_battery_charge_limit, b_battery_initial_state))
# time with sparse matrices: csr: 92.63453531265259, lil: 1.463003396987915
# the following calculation of the solution (linprog): took like two hours, did not finish I stopped it, maybe I
# could use more linprog options, or I just don't use this (better I think)
res = linprog(c, a, b.toarray(), method="highs")
sol = res.x
print("Finished")
def load_energy_data(seed=0, with_new=True):
""" Load data points for the energy system. Currently contains 10000 data points. """
# data has 9 columns: cost_pv, cost_bat, cost_buy, demand, cap_pv, cap_bat, own_gen, totex, capex
orig_data = np.load("data/energy_data.npy")
orig_data_plus = None
if with_new:
# "data_plus.npy" is the combination of "data_values_interval.npy", data_values_around_min.npy", and
# "data_values_around_max.npy", generated by "energy_data.py"
orig_data_plus = np.load("data/data_plus.npy")
orig_data = np.concatenate((orig_data, orig_data_plus))
np.random.seed(seed)
np.random.shuffle(orig_data)
# this manually set mean is really (!) close to the calculated mean anyway (with infinite data, it would be exactly
# the same) but using this manually set means means we have data for +1 and -1 (normalized) for all input values
# when also using the newly generated data
if with_new:
# orig_data_plus[100] is exactly the mean data with corresponding outputs
data_mean = orig_data_plus[100] # np.mean(orig_data, axis=0)
else:
data_mean = np.mean(orig_data, axis=0)
# normalize data and save variables to be able to reverse that
data = orig_data - data_mean
data_max = np.max(np.abs(data), axis=0)
data = data / data_max
e_input = data[:, :4]
e_output = data[:, 4:]
return e_input, e_output, data_mean, data_max, orig_data
def vis_energy(attributions, values=None, edge_labels=False, vis_real=False):
""" Visualizes attribution for the energy neural network, from inputs to outputs. """
inp_params, output_vals, pred_outputs = None, None, None
if values is not None:
inp_params, output_vals, pred_outputs = values
g = nx.DiGraph()
# index to name (input/output)
itni = {0: "Photovoltaik", 1: "Batteriespeicher", 2: "Stromnetz", 3: "Energiebedarf"}
itno = {0: "Kapazität PV", 1: "Kapazität Batterie", 2: "Eigenerzeugung", 3: "TOTEX", 4: "CAPEX"}
# define nodes
g.add_node("Photovoltaik", pos=(0, 7))
g.add_node("Batteriespeicher", pos=(0, 5))
g.add_node("Stromnetz", pos=(0, 3))
g.add_node("Energiebedarf", pos=(0, 1))
g.add_node("Kapazität PV", pos=(5, 8))
g.add_node("Kapazität Batterie", pos=(5, 6))
g.add_node("Eigenerzeugung", pos=(5, 4))
g.add_node("TOTEX", pos=(5, 2))
g.add_node("CAPEX", pos=(5, 0))
labeldict = {}
if values is not None:
labeldict["Photovoltaik"] = f"Photovoltaik\n{inp_params[0, 0]:.2f}"
labeldict["Batteriespeicher"] = f"Batteriespeicher\n{inp_params[0, 1]:.2f}"
labeldict["Stromnetz"] = f"Stromnetz\n{inp_params[0, 2]:.2f}"
labeldict["Energiebedarf"] = f"Energiebedarf\n{inp_params[0, 3]:.2f}"
if output_vals is None:
labeldict["Kapazität PV"] = f"Kapazität PV\n{pred_outputs[0, 0]:.2f}"
labeldict["Kapazität Batterie"] = f"Kapazität Batterie\n{pred_outputs[0, 1]:.2f}"
labeldict["Eigenerzeugung"] = f"Eigenerzeugung\n{pred_outputs[0, 2]:.2f}"
labeldict["TOTEX"] = f"TOTEX\n{pred_outputs[0, 3]:.2f}"
labeldict["CAPEX"] = f"CAPEX\n{pred_outputs[0, 4]:.2f}"
else:
labeldict["Kapazität PV"] = f"Kapazität PV\n{pred_outputs[0, 0]:.2f} ({output_vals[0, 0]:.2f})"
labeldict["Kapazität Batterie"] = f"Kapazität Batterie\n{pred_outputs[0, 1]:.2f} ({output_vals[0, 1]:.2f})"
labeldict["Eigenerzeugung"] = f"Eigenerzeugung\n{pred_outputs[0, 2]:.2f} ({output_vals[0, 2]:.2f})"
labeldict["TOTEX"] = f"TOTEX\n{pred_outputs[0, 3]:.2f} ({output_vals[0, 3]:.2f})"
labeldict["CAPEX"] = f"CAPEX\n{pred_outputs[0, 4]:.2f} ({output_vals[0, 4]:.2f})"
if vis_real:
_, _, data_mean, data_max, _ = load_energy_data()
str_add = "\n" + r"$\rightarrow$"
input_diffs = inp_params[0, :] * data_max[:4]
real_inputs = input_diffs + data_mean[:4]
labeldict["Photovoltaik"] += str_add + f"{real_inputs[0]:.0f} ({'+' if input_diffs[0] > 0 else ''}{input_diffs[0]:.0f})"
labeldict["Batteriespeicher"] += str_add + f"{real_inputs[1]:.0f} ({'+' if input_diffs[1] > 0 else ''}{input_diffs[1]:.0f})"
labeldict["Stromnetz"] += str_add + f"{real_inputs[2]:.0f} ({'+' if input_diffs[2] > 0 else ''}{input_diffs[2]:.3f})"
labeldict["Energiebedarf"] += str_add + f"{real_inputs[3]:.0f} ({'+' if input_diffs[3] > 0 else ''}{input_diffs[3]:.0f})"
output_diffs = pred_outputs[0, :] * data_max[4:]
real_outputs = output_diffs + data_mean[4:]
labeldict["Kapazität PV"] += str_add + f"{real_outputs[0]:.2f} ({'+' if output_diffs[0] > 0 else ''}{output_diffs[0]:.2f})"
labeldict["Kapazität Batterie"] += str_add + f"{real_outputs[1]:.2f} ({'+' if output_diffs[1] > 0 else ''}{output_diffs[1]:.2f})"
labeldict["Eigenerzeugung"] += str_add + f"{real_outputs[2]:.3f} ({'+' if output_diffs[2] > 0 else ''}{output_diffs[2]:.3f})"
labeldict["TOTEX"] += str_add + f"{real_outputs[3]:.0f} ({'+' if output_diffs[3] > 0 else ''}{output_diffs[3]:.0f})"
labeldict["CAPEX"] += str_add + f"{real_outputs[4]:.0f} ({'+' if output_diffs[4] > 0 else ''}{output_diffs[4]:.0f})"
else:
labeldict["Photovoltaik"] = "Photovoltaik"
labeldict["Batteriespeicher"] = "Batteriespeicher"
labeldict["Stromnetz"] = "Stromnetz"
labeldict["Energiebedarf"] = "Energiebedarf"
labeldict["Kapazität PV"] = "Kapazität PV"
labeldict["Kapazität Batterie"] = "Kapazität Batterie"
labeldict["Eigenerzeugung"] = "Eigenerzeugung"
labeldict["TOTEX"] = "TOTEX"
labeldict["CAPEX"] = "CAPEX"
edge_list = []
edge_attr = []
for i, o in attributions:
edge_list.append((itni[i], itno[o]))
edge_attr.append(attributions[i, o])
color_bounds = np.max(np.abs(edge_attr))
cmap = plt.cm.RdBu
pos = nx.get_node_attributes(g, "pos")
nx.draw(g, pos, labels=labeldict, with_labels=True, node_color="w")
nx.draw_networkx_edges(g, pos, edgelist=edge_list, edge_color=edge_attr, edge_cmap=cmap, edge_vmin=-color_bounds,
edge_vmax=color_bounds)
if edge_labels:
e_labels = {(itni[key[0]], itno[key[1]]): f"{attributions[key]:.4f}" for key in attributions.keys()}
nx.draw_networkx_edge_labels(g, pos=pos, edge_labels=e_labels, label_pos=0.5)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=-color_bounds, vmax=color_bounds))
sm.set_array([])
plt.colorbar(sm)
plt.show()
def visualize_loss(train_loss, test_loss):
""" Plot the train and test loss of a neural network after learning. One graph shows the loss progress over all
iterations, another one for only the last 10 iterations (can see whether it is still improving)."""
nr_epochs = list(range(len(train_loss)+1))[1:]
print(train_loss)
print(test_loss)
ax1 = plt.subplot(2, 1, 1)
ax1.plot(nr_epochs, train_loss, label="Train")
ax1.plot(nr_epochs, test_loss, label="Test")
ax1.set_ylabel("Loss")
ax1.set_xlabel("Epoch")
ax1.set_xticks(nr_epochs)
ax1.set_xticklabels(str(epoch) for epoch in nr_epochs)
ax1.set_title("Loss over all epochs")
ax1.grid(True)
ax1.legend()
ax2 = plt.subplot(2, 1, 2)
ax2.plot(nr_epochs[-10:], train_loss[-10:], label="Train")
ax2.plot(nr_epochs[-10:], test_loss[-10:], label="Test")
ax2.set_ylabel("Loss")
ax2.set_xlabel("Epoch")
ax2.set_xticks(nr_epochs[-10:])
ax2.set_xticklabels(str(epoch) for epoch in nr_epochs[-10:])
ax2.set_title("Loss over all epochs")
ax2.set_title("Loss over the last 10 epochs")
ax2.grid(True)
ax2.legend()
plt.subplots_adjust(hspace=0.6)
plt.show()
class EnergyNet(nn.Module):
""" Neural network learning the relationship of the input and output values as loaded by load_energy_data. """
def __init__(self, dim_input, dim_output):
""" Initialize the neural network. """
super(EnergyNet, self).__init__()
factor = 80 * 2 * 2 * 2 * 2
self.fc1 = nn.Linear(dim_input, factor * 2)
self.fc2 = nn.Linear(factor * 2, factor * 2)
self.fc2b = nn.Linear(factor * 2, factor * 2)
self.fc3 = nn.Linear(factor * 2, factor)
self.fc4 = nn.Linear(factor, dim_output)
def forward(self, param):
""" Forward pass. """
h = self.fc1(param)
h = func.relu(h)
h = self.fc2(h)
h = func.relu(h)
h = self.fc2b(h)
h = func.relu(h)
h = self.fc3(h)
h = func.relu(h)
output = self.fc4(h)
return output
def train(args, model, device, train_loader, optimizer, epoch):
""" Train the model. """
model.train()
# mean squared error loss for output
criterion = torch.nn.MSELoss()
for batch_idx, (e_input, e_output) in enumerate(train_loader):
e_input, e_output = e_input.to(device), e_output.to(device)
optimizer.zero_grad()
prediction = model(e_input)
loss = criterion(prediction, e_output)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(epoch, batch_idx * len(e_input),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
""" Test the model. """
model.eval()
test_loss = 0
# mean squared error loss for output
criterion = torch.nn.MSELoss(reduction="sum")
with torch.no_grad():
for (e_input, e_output) in test_loader:
e_input, e_output = e_input.to(device), e_output.to(device)
prediction = model(e_input)
test_loss += criterion(prediction, e_output).item()
test_loss /= len(test_loader.batch_sampler)
print("\nTest set: Average loss: {:.4f}\n".format(test_loss))
return test_loss
def train_model(args):
""" Get model parameters, data and train a model. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.manual_seed(args.seed)
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1,
"pin_memory": True,
"shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
e_input, e_output, _, _, _ = load_energy_data(args.seed)
# parameter
input_train = e_input[:e_input.shape[0] // 2]
input_test = e_input[e_input.shape[0] // 2:]
# use half of the data for training and testing each
output_train = e_output[:e_output.shape[0] // 2]
output_test = e_output[e_output.shape[0] // 2:]
tensor_input_train = torch.Tensor(input_train)
tensor_input_test = torch.Tensor(input_test)
tensor_output_train = torch.Tensor(output_train)
tensor_output_test = torch.Tensor(output_test)
dataset_train = TensorDataset(tensor_input_train, tensor_output_train)
dataset_test = TensorDataset(tensor_input_test, tensor_output_test)
dataloader_train = DataLoader(dataset_train, **train_kwargs)
dataloader_train_for_test = DataLoader(dataset_train, **test_kwargs)
dataloader_test = DataLoader(dataset_test, **test_kwargs)
model = EnergyNet(e_input.shape[1], e_output.shape[1]).to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
train_loss = []
test_loss = []
for epoch in range(1, args.epochs + 1):
train(args, model, device, dataloader_train, optimizer, epoch)
train_loss.append(test(model, device, dataloader_train_for_test))
test_loss.append(test(model, device, dataloader_test))
scheduler.step()
visualize_loss(train_loss, test_loss)
print(train_loss)
print(test_loss)
if args.save_model:
# save the model
save_path = f"models/Energy_{args.save_name}.pt"
torch.save(model.state_dict(), save_path)
return model
def prepare_model(args):
""" Define the model and load the state in the specified path. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
e_input, e_output, _, _, _ = load_energy_data(args.seed)
model = EnergyNet(e_input.shape[1], e_output.shape[1]).to(device)
# load the model state
save_path = f"models/Energy_{args.save_name}.pt"
model.load_state_dict(torch.load(save_path))
return model
def apply_visualization(model, args):
""" Set all necessary parameters and call the right visualization method. """
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
e_input, e_output, _, _, _ = load_energy_data(args.seed)
input_test = e_input[e_input.shape[0] // 2:]
output_test = e_output[e_output.shape[0] // 2:]
start_index = args.num_vis * args.vis_next
if start_index + args.num_vis > input_test.shape[0]:
raise ValueError(f"There are not enough test instances to visualize with respect to \"args.num_vis\": "
f"{args.num_vis} and \"args.vis_next\": {args.vis_next}")
input_test = input_test[start_index:start_index + args.num_vis]
tensor_input_vis = torch.Tensor(input_test).to(device)
output_test = output_test[start_index:start_index + args.num_vis]
attributions = {}
for i in range(args.num_vis):
# custom input
if args.vis_input:
vis_input_str = input("Enter input values ([-1, 1], 4 values, whitespace separated):")
for index, value in enumerate(vis_input_str.split()):
tensor_input_vis[i, index] = float(value)
for output_index in range(5):
ig = IntegratedGradients(model)
# this following code or the block after? What is better, and for what purpose? Both seem very similar
# for input_index in range(4):
# if args.vis_only_input != -1 and args.vis_only_input != input_index:
# continue
# # baseline
# bl = tensor_input_vis[i:i + 1].detach().clone()
# bl[0, input_index] = 0
# attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
# attr = attr.detach().cpu().numpy()[0]
# if (input_index, output_index) in attributions:
# attributions[(input_index, output_index)] += attr[input_index]
# else:
# attributions[(input_index, output_index)] = attr[input_index]
# which baseline to use
choose_baseline = args.baseline
# baseline for all smallest and all largest inputs
if choose_baseline == "edges":
bl = tensor_input_vis[i:i + 1].detach().clone()
bl[0, :] = -1
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
bl[0, :] = 1
attr2 = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr += attr2.detach().cpu().numpy()[0]
# random: multiple baselines, uniformly distributed within [-1, 1], average for final attribution
elif choose_baseline == "random":
all_attr = None
for bls in range(10):
bl = ((torch.rand(1, 4) * 2) - 1).to(device)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
if bls == 0:
all_attr = attr
else:
all_attr += attr
attr = all_attr / 10
# gaussian: multiple baselines, gaussian distributed around 0, average for final attribution
elif choose_baseline == "gaussian":
all_attr = None
for bls in range(10):
std = 0.25 # pretty close to the underlying data std
bl = torch.normal(torch.tensor([[0.0, 0.0, 0.0, 0.0]]), std).to(device)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
if bls == 0:
all_attr = attr
else:
all_attr += attr
attr = all_attr / 10
# baseline as specified in args
else:
bl = tensor_input_vis[i:i + 1].detach().clone()
bl[0, :] = float(choose_baseline)
attr = ig.attribute(tensor_input_vis[i:i + 1], baselines=bl, target=output_index)
attr = attr.detach().cpu().numpy()[0]
for input_index in range(4):
if args.vis_only_input == -1 or args.vis_only_input == input_index:
attributions[(input_index, output_index)] = attr[input_index]
if not args.vis_agg:
pred = model(tensor_input_vis[i:i + 1]).detach().cpu().numpy()
if args.vis_input:
out_label = None
else:
out_label = output_test[i:i + 1]
vis_energy(attributions, values=(tensor_input_vis[i:i + 1].detach().cpu().numpy(), out_label, pred),
edge_labels=args.vis_only_input != -1, vis_real=args.vis_real_values)
attributions = {}
if args.vis_agg:
# pred = model(tensor_input_vis).detach().cpu().numpy()
vis_energy(attributions, edge_labels=args.vis_only_input != -1, vis_real=args.vis_real_values)
return
def prepare_arguments():
""" Define and return arguments. """
parser = argparse.ArgumentParser(description="PyTorch Energy Experiment v0_1")
# model training
parser.add_argument("--batch-size", type=int, default=64, metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument("--test-batch-size", type=int, default=1000, metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument("--epochs", type=int, default=5, metavar="N", help="number of epochs to train (default: 5)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--log-interval", type=int, default=10, metavar="N",
help="how many batches to wait before logging training status")
# model saving / loading
parser.add_argument("--save-model", action="store_true", default=False, help="save the current model")
parser.add_argument("--load-model", action="store_true", default=False, help="load a model")
parser.add_argument("--save-name", type=str, default="0", metavar="NAME",
help="name with which the model will be saved or loaded")
# visualization
parser.add_argument("--vis", action="store_true", default=False, help="visualize model performance and attribution")
parser.add_argument("--num-vis", type=int, default=10, metavar="N", help="number of instanced to be visualized")
parser.add_argument("--vis-agg", action="store_true", default=False,
help="aggregate the attribution of all \"num-vis\" instances before the visualization)")
parser.add_argument("--vis-next", type=int, default=0, metavar="N",
help="skips the first vis_next * num_vis instances, can visualize other instances that way")
parser.add_argument("--vis-save", action="store_true", default=False,
help="save the visualization, otherwise simply show it")
parser.add_argument("--vis-input", action="store_true", default=False,
help="enter own inputs for the visualization")
parser.add_argument("--baseline", type=str, default="0", metavar="NAME OR NUMBER",
help="which baseline to use (\"edges\", \"random\", or a number as the baseline)")
parser.add_argument("--vis-real-values", action="store_true", default=False,
help="also show the unnormalized values on the visualization")
parser.add_argument("--vis-only-input", type=int, default=-1, metavar="N", help="only visualize for specific input")
args = parser.parse_args()
return args
def main():
""" Run the neural network with the specified arguments. """
# get arguments
args = prepare_arguments()
# get the model
if not args.load_model:
# train the model
model = train_model(args)
else:
# load the model
model = prepare_model(args)
# obtain and visualize attributions
if args.vis:
apply_visualization(model, args)
if __name__ == "__main__":
main()
| [
"networkx.draw_networkx_edge_labels",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"numpy.genfromtxt",
"networkx.draw_networkx_edges",
"numpy.mean",
"scipy.sparse.lil_matrix",
"argparse.ArgumentParser",
"matplotlib.pyplot.Normalize",
"networkx.DiGraph",
"captum.attr.Integrated... | [((924, 944), 'numpy.full', 'np.full', (['time', 'c_buy'], {}), '(time, c_buy)\n', (931, 944), True, 'import numpy as np\n'), ((959, 977), 'numpy.zeros', 'np.zeros', (['(time * 5)'], {}), '(time * 5)\n', (967, 977), True, 'import numpy as np\n'), ((1453, 1489), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * time, x_dim)'], {}), '((2 * time, x_dim))\n', (1470, 1489), False, 'from scipy import sparse\n'), ((1511, 1543), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * time, 1)'], {}), '((2 * time, 1))\n', (1528, 1543), False, 'from scipy import sparse\n'), ((2331, 2373), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * (time - 1), x_dim)'], {}), '((2 * (time - 1), x_dim))\n', (2348, 2373), False, 'from scipy import sparse\n'), ((2399, 2437), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2 * (time - 1), 1)'], {}), '((2 * (time - 1), 1))\n', (2416, 2437), False, 'from scipy import sparse\n'), ((3375, 3407), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, x_dim)'], {}), '((time, x_dim))\n', (3392, 3407), False, 'from scipy import sparse\n'), ((3436, 3464), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, 1)'], {}), '((time, 1))\n', (3453, 3464), False, 'from scipy import sparse\n'), ((3746, 3778), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, x_dim)'], {}), '((time, x_dim))\n', (3763, 3778), False, 'from scipy import sparse\n'), ((3808, 3836), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(time, 1)'], {}), '((time, 1))\n', (3825, 3836), False, 'from scipy import sparse\n'), ((4056, 4085), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2, x_dim)'], {}), '((2, x_dim))\n', (4073, 4085), False, 'from scipy import sparse\n'), ((4116, 4141), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['(2, 1)'], {}), '((2, 1))\n', (4133, 4141), False, 'from scipy import sparse\n'), ((4467, 4596), 'scipy.sparse.vstack', 'sparse.vstack', (['(a_energy_balance, a_battery_equation, a_pv_production_limit,\n a_battery_charge_limit, a_battery_initial_state)'], {}), '((a_energy_balance, a_battery_equation, a_pv_production_limit,\n a_battery_charge_limit, a_battery_initial_state))\n', (4480, 4596), False, 'from scipy import sparse\n'), ((4610, 4739), 'scipy.sparse.vstack', 'sparse.vstack', (['(b_energy_balance, b_battery_equation, b_pv_production_limit,\n b_battery_charge_limit, b_battery_initial_state)'], {}), '((b_energy_balance, b_battery_equation, b_pv_production_limit,\n b_battery_charge_limit, b_battery_initial_state))\n', (4623, 4739), False, 'from scipy import sparse\n'), ((5370, 5401), 'numpy.load', 'np.load', (['"""data/energy_data.npy"""'], {}), "('data/energy_data.npy')\n", (5377, 5401), True, 'import numpy as np\n'), ((5749, 5769), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5763, 5769), True, 'import numpy as np\n'), ((5774, 5802), 'numpy.random.shuffle', 'np.random.shuffle', (['orig_data'], {}), '(orig_data)\n', (5791, 5802), True, 'import numpy as np\n'), ((6922, 6934), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6932, 6934), True, 'import networkx as nx\n'), ((11159, 11191), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['g', '"""pos"""'], {}), "(g, 'pos')\n", (11181, 11191), True, 'import networkx as nx\n'), ((11197, 11264), 'networkx.draw', 'nx.draw', (['g', 'pos'], {'labels': 'labeldict', 'with_labels': '(True)', 'node_color': '"""w"""'}), "(g, pos, labels=labeldict, with_labels=True, node_color='w')\n", (11204, 11264), True, 'import networkx as nx\n'), ((11270, 11411), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g', 'pos'], {'edgelist': 'edge_list', 'edge_color': 'edge_attr', 'edge_cmap': 'cmap', 'edge_vmin': '(-color_bounds)', 'edge_vmax': 'color_bounds'}), '(g, pos, edgelist=edge_list, edge_color=edge_attr,\n edge_cmap=cmap, edge_vmin=-color_bounds, edge_vmax=color_bounds)\n', (11292, 11411), True, 'import networkx as nx\n'), ((11778, 11794), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {}), '(sm)\n', (11790, 11794), True, 'import matplotlib.pyplot as plt\n'), ((11800, 11810), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11808, 11810), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12201), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (12192, 12201), True, 'import matplotlib.pyplot as plt\n'), ((12535, 12555), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (12546, 12555), True, 'import matplotlib.pyplot as plt\n'), ((12969, 13000), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)'}), '(hspace=0.6)\n', (12988, 13000), True, 'import matplotlib.pyplot as plt\n'), ((13006, 13016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13014, 13016), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14094), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (14092, 14094), False, 'import torch\n'), ((15062, 15095), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (15078, 15095), False, 'import torch\n'), ((15641, 15684), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (15653, 15684), False, 'import torch\n'), ((15690, 15718), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15707, 15718), False, 'import torch\n'), ((16415, 16440), 'torch.Tensor', 'torch.Tensor', (['input_train'], {}), '(input_train)\n', (16427, 16440), False, 'import torch\n'), ((16465, 16489), 'torch.Tensor', 'torch.Tensor', (['input_test'], {}), '(input_test)\n', (16477, 16489), False, 'import torch\n'), ((16517, 16543), 'torch.Tensor', 'torch.Tensor', (['output_train'], {}), '(output_train)\n', (16529, 16543), False, 'import torch\n'), ((16569, 16594), 'torch.Tensor', 'torch.Tensor', (['output_test'], {}), '(output_test)\n', (16581, 16594), False, 'import torch\n'), ((16616, 16670), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tensor_input_train', 'tensor_output_train'], {}), '(tensor_input_train, tensor_output_train)\n', (16629, 16670), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16690, 16742), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tensor_input_test', 'tensor_output_test'], {}), '(tensor_input_test, tensor_output_test)\n', (16703, 16742), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16767, 16808), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {}), '(dataset_train, **train_kwargs)\n', (16777, 16808), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16841, 16881), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {}), '(dataset_train, **test_kwargs)\n', (16851, 16881), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((16904, 16943), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {}), '(dataset_test, **test_kwargs)\n', (16914, 16943), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((17096, 17144), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': 'args.gamma'}), '(optimizer, step_size=1, gamma=args.gamma)\n', (17102, 17144), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((17894, 17937), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (17906, 17937), False, 'import torch\n'), ((18414, 18457), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (18426, 18457), False, 'import torch\n'), ((23621, 23690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Energy Experiment v0_1"""'}), "(description='PyTorch Energy Experiment v0_1')\n", (23644, 23690), False, 'import argparse\n'), ((5650, 5679), 'numpy.load', 'np.load', (['"""data/data_plus.npy"""'], {}), "('data/data_plus.npy')\n", (5657, 5679), True, 'import numpy as np\n'), ((5700, 5743), 'numpy.concatenate', 'np.concatenate', (['(orig_data, orig_data_plus)'], {}), '((orig_data, orig_data_plus))\n', (5714, 5743), True, 'import numpy as np\n'), ((6287, 6313), 'numpy.mean', 'np.mean', (['orig_data'], {'axis': '(0)'}), '(orig_data, axis=0)\n', (6294, 6313), True, 'import numpy as np\n'), ((6436, 6448), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (6442, 6448), True, 'import numpy as np\n'), ((11106, 11123), 'numpy.abs', 'np.abs', (['edge_attr'], {}), '(edge_attr)\n', (11112, 11123), True, 'import numpy as np\n'), ((11573, 11650), 'networkx.draw_networkx_edge_labels', 'nx.draw_networkx_edge_labels', (['g'], {'pos': 'pos', 'edge_labels': 'e_labels', 'label_pos': '(0.5)'}), '(g, pos=pos, edge_labels=e_labels, label_pos=0.5)\n', (11601, 11650), True, 'import networkx as nx\n'), ((13354, 13386), 'torch.nn.Linear', 'nn.Linear', (['dim_input', '(factor * 2)'], {}), '(dim_input, factor * 2)\n', (13363, 13386), True, 'import torch.nn as nn\n'), ((13406, 13439), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', '(factor * 2)'], {}), '(factor * 2, factor * 2)\n', (13415, 13439), True, 'import torch.nn as nn\n'), ((13460, 13493), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', '(factor * 2)'], {}), '(factor * 2, factor * 2)\n', (13469, 13493), True, 'import torch.nn as nn\n'), ((13513, 13542), 'torch.nn.Linear', 'nn.Linear', (['(factor * 2)', 'factor'], {}), '(factor * 2, factor)\n', (13522, 13542), True, 'import torch.nn as nn\n'), ((13562, 13591), 'torch.nn.Linear', 'nn.Linear', (['factor', 'dim_output'], {}), '(factor, dim_output)\n', (13571, 13591), True, 'import torch.nn as nn\n'), ((13693, 13705), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13702, 13705), True, 'import torch.nn.functional as func\n'), ((13742, 13754), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13751, 13754), True, 'import torch.nn.functional as func\n'), ((13792, 13804), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13801, 13804), True, 'import torch.nn.functional as func\n'), ((13841, 13853), 'torch.nn.functional.relu', 'func.relu', (['h'], {}), '(h)\n', (13850, 13853), True, 'import torch.nn.functional as func\n'), ((15105, 15120), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15118, 15120), False, 'import torch\n'), ((15602, 15627), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15625, 15627), False, 'import torch\n'), ((17855, 17880), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17878, 17880), False, 'import torch\n'), ((18177, 18198), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (18187, 18198), False, 'import torch\n'), ((18375, 18400), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18398, 18400), False, 'import torch\n'), ((767, 802), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/TS_Demand.csv"""'], {}), "('data/TS_Demand.csv')\n", (780, 802), True, 'import numpy as np\n'), ((837, 873), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/TS_PVAvail.csv"""'], {}), "('data/TS_PVAvail.csv')\n", (850, 873), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.array', 'np.array', (['[c_pv]'], {}), '([c_pv])\n', (1040, 1048), True, 'import numpy as np\n'), ((1050, 1067), 'numpy.array', 'np.array', (['[c_bat]'], {}), '([c_bat])\n', (1058, 1067), True, 'import numpy as np\n'), ((11699, 11751), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': '(-color_bounds)', 'vmax': 'color_bounds'}), '(vmin=-color_bounds, vmax=color_bounds)\n', (11712, 11751), True, 'import matplotlib.pyplot as plt\n'), ((19014, 19038), 'torch.Tensor', 'torch.Tensor', (['input_test'], {}), '(input_test)\n', (19026, 19038), False, 'import torch\n'), ((19507, 19533), 'captum.attr.IntegratedGradients', 'IntegratedGradients', (['model'], {}), '(model)\n', (19526, 19533), False, 'from captum.attr import IntegratedGradients\n'), ((21232, 21248), 'torch.rand', 'torch.rand', (['(1)', '(4)'], {}), '(1, 4)\n', (21242, 21248), False, 'import torch\n'), ((21942, 21978), 'torch.tensor', 'torch.tensor', (['[[0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0]])\n', (21954, 21978), False, 'import torch\n')] |