code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import os
import io
from setuptools import setup
def get_version(filename):
here = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(here, filename))
version_match = f.read()
f.close()
if version_match:
return version_match
raise RuntimeError("Unable to find version string.")
setup(name='pytest-progress',
version=get_version('version.txt'),
description='pytest plugin for instant test progress status',
long_description=io.open('README.rst', encoding='utf-8', errors='ignore').read(),
author='santosh',
author_email=u'<EMAIL>',
url=u'https://github.com/ssrikanta/pytest-progress',
license = 'MIT',
license_file = 'LICENSE',
py_modules=['pytest_progress'],
entry_points={'pytest11': ['progress = pytest_progress']},
install_requires=['pytest>=2.7'],
keywords='py.test pytest report',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
|
[
"os.path.abspath",
"os.path.join",
"io.open"
] |
[((107, 132), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (122, 132), False, 'import os\n'), ((147, 175), 'os.path.join', 'os.path.join', (['here', 'filename'], {}), '(here, filename)\n', (159, 175), False, 'import os\n'), ((495, 551), 'io.open', 'io.open', (['"""README.rst"""'], {'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "('README.rst', encoding='utf-8', errors='ignore')\n", (502, 551), False, 'import io\n')]
|
import os
from pathlib import Path
class WorkingDirFactory:
def create(self):
return Path(os.getcwd())
|
[
"os.getcwd"
] |
[((104, 115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (113, 115), False, 'import os\n')]
|
import os
from unittest import TestCase
from unittest.mock import patch
from ui_automation_core.utilities.log_utils import LogUtils
class LoggingTest(TestCase):
def test_LogUtils_is_instantiate(self):
log_dir = '../logs/'
log_util = LogUtils(log_dir)
self.assertEqual('An instance of LogUtils is created and `logs` folder is created.', str(log_util))
@patch('builtins.print')
def test_create_log_directory(self, mock_print):
"""
Tests whether `Logs` directory gets created if it does not exist or skips execution if the directory exists.
:param mock_print:
:return:
"""
is_log_folder_exists = os.path.isdir('../logs/')
log_dir = '../logs/'
LogUtils(log_dir).create_log_directory()
if is_log_folder_exists:
mock_print.assert_called_with('Directory ../logs/ already exists')
else:
mock_print.assert_called_with('Directory ../logs/ Created')
|
[
"unittest.mock.patch",
"ui_automation_core.utilities.log_utils.LogUtils",
"os.path.isdir"
] |
[((389, 412), 'unittest.mock.patch', 'patch', (['"""builtins.print"""'], {}), "('builtins.print')\n", (394, 412), False, 'from unittest.mock import patch\n'), ((257, 274), 'ui_automation_core.utilities.log_utils.LogUtils', 'LogUtils', (['log_dir'], {}), '(log_dir)\n', (265, 274), False, 'from ui_automation_core.utilities.log_utils import LogUtils\n'), ((682, 707), 'os.path.isdir', 'os.path.isdir', (['"""../logs/"""'], {}), "('../logs/')\n", (695, 707), False, 'import os\n'), ((745, 762), 'ui_automation_core.utilities.log_utils.LogUtils', 'LogUtils', (['log_dir'], {}), '(log_dir)\n', (753, 762), False, 'from ui_automation_core.utilities.log_utils import LogUtils\n')]
|
# -*- coding: UTF-8 -*-
"""
训练神经网络,将参数(Weight)存入 HDF5 文件
"""
import numpy as np
import tensorflow as tf
from utils import *
from network import *
"""
==== 一些术语的概念 ====
# Batch size : 批次(样本)数目。一次迭代(Forword 运算(用于得到损失函数)以及 BackPropagation 运算(用于更新神经网络参数))所用的样本数目。Batch size 越大,所需的内存就越大
# Iteration : 迭代。每一次迭代更新一次权重(网络参数),每一次权重更新需要 Batch size 个数据进行 Forward 运算,再进行 BP 运算
# Epoch : 纪元/时代。所有的训练样本完成一次迭代
# 假如 : 训练集有 1000 个样本,Batch_size=10
# 那么 : 训练完整个样本集需要: 100 次 Iteration,1 个 Epoch
# 但一般我们都不止训练一个 Epoch
"""
# 训练神经网络
def train():
notes = get_notes()
# 得到所有不重复(因为用了 set)的音调数目
num_pitch = len(set(notes))
network_input, network_output = prepare_sequences(notes, num_pitch)
model = network_model(network_input, num_pitch)
filepath = "weights-{epoch:02d}-{loss:.4f}.hdf5"
# 用 Checkpoint(检查点)文件在每一个 Epoch 结束时保存模型的参数(Weights)
# 不怕训练过程中丢失模型参数。可以在我们对 Loss(损失)满意了的时候随时停止训练
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath, # 保存的文件路径
monitor='loss', # 监控的对象是 损失(loss)
verbose=0,
save_best_only=True, # 不替换最近的数值最佳的监控对象的文件
mode='min' # 取损失最小的
)
callbacks_list = [checkpoint]
# 用 fit 方法来训练模型
model.fit(network_input, network_output, epochs=100, batch_size=64, callbacks=callbacks_list)
def prepare_sequences(notes, num_pitch):
"""
为神经网络准备好供训练的序列
"""
sequence_length = 100 # 序列长度
# 得到所有音调的名字
pitch_names = sorted(set(item for item in notes))
# 创建一个字典,用于映射 音调 和 整数
pitch_to_int = dict((pitch, num) for num, pitch in enumerate(pitch_names))
# 创建神经网络的输入序列和输出序列
network_input = []
network_output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i: i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([pitch_to_int[char] for char in sequence_in])
network_output.append(pitch_to_int[sequence_out])
n_patterns = len(network_input)
# 将输入的形状转换成神经网络模型可以接受的
network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# 将 输入 标准化 / 归一化
# 归一话可以让之后的优化器(optimizer)更快更好地找到误差最小值
network_input = network_input / float(num_pitch)
# 将期望输出转换成 {0, 1} 组成的布尔矩阵,为了配合 categorical_crossentropy 误差算法使用
network_output = tf.keras.utils.to_categorical(network_output)
return network_input, network_output
if __name__ == '__main__':
train()
|
[
"tensorflow.keras.utils.to_categorical",
"numpy.reshape",
"tensorflow.keras.callbacks.ModelCheckpoint"
] |
[((917, 1025), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=0,\n save_best_only=True, mode='min')\n", (951, 1025), True, 'import tensorflow as tf\n'), ((2025, 2084), 'numpy.reshape', 'np.reshape', (['network_input', '(n_patterns, sequence_length, 1)'], {}), '(network_input, (n_patterns, sequence_length, 1))\n', (2035, 2084), True, 'import numpy as np\n'), ((2291, 2336), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['network_output'], {}), '(network_output)\n', (2320, 2336), True, 'import tensorflow as tf\n')]
|
from uuid import UUID
import pytest
from katka import models
@pytest.mark.django_db
class TestProjectViewSet:
def test_list(self, client, logged_in_user, my_team, my_project):
response = client.get("/projects/")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 1
assert parsed[0]["name"] == "Project D"
assert parsed[0]["slug"] == "PRJD"
parsed_team = parsed[0]["team"]
assert UUID(parsed_team) == my_team.public_identifier
def test_filtered_list(self, client, logged_in_user, my_team, my_project, my_other_team, my_other_project):
response = client.get("/projects/?team=" + str(my_other_team.public_identifier))
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 1
assert parsed[0]["name"] == "Project 2"
assert parsed[0]["slug"] == "PRJ2"
parsed_team = parsed[0]["team"]
assert UUID(parsed_team) == my_other_team.public_identifier
def test_filtered_list_non_existing_team(
self, client, logged_in_user, my_team, my_project, my_other_team, my_other_project
):
response = client.get("/applications/?project=12345678-1234-5678-1234-567812345678")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 0
def test_list_excludes_inactive(self, client, logged_in_user, my_team, deactivated_project):
response = client.get("/projects/")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 0
def test_get(self, client, logged_in_user, my_team, my_project):
response = client.get(f"/projects/{my_project.public_identifier}/")
assert response.status_code == 200
parsed = response.json()
assert parsed["name"] == "Project D"
assert parsed["slug"] == "PRJD"
assert UUID(parsed["team"]) == my_team.public_identifier
def test_get_excludes_inactive(self, client, logged_in_user, my_team, deactivated_project):
response = client.get(f"/projects/{deactivated_project.public_identifier}/")
assert response.status_code == 404
def test_delete(self, client, logged_in_user, my_team, my_project):
response = client.delete(f"/projects/{my_project.public_identifier}/")
assert response.status_code == 204
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.deleted is True
def test_update(self, client, logged_in_user, my_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": my_team.public_identifier}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 200
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.name == "Project X"
def test_update_deactivated_team(self, client, logged_in_user, deactivated_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": deactivated_team.public_identifier}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 403
def test_update_nonexistent_team(self, client, logged_in_user, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": "00000000-0000-0000-0000-000000000000"}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 403
def test_partial_update(self, client, logged_in_user, my_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X"}
response = client.patch(url, data, content_type="application/json")
assert response.status_code == 200
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.name == "Project X"
def test_create(self, client, logged_in_user, my_team, my_project):
before = models.Project.objects.count()
url = f"/projects/"
data = {"name": "Project X", "slug": "PRJX", "team": my_team.public_identifier}
response = client.post(url, data=data, content_type="application/json")
assert response.status_code == 201
p = models.Project.objects.get(name="Project X")
assert p.name == "Project X"
assert models.Project.objects.count() == before + 1
|
[
"katka.models.Project.objects.get",
"uuid.UUID",
"katka.models.Project.objects.count"
] |
[((2437, 2496), 'katka.models.Project.objects.get', 'models.Project.objects.get', ([], {'pk': 'my_project.public_identifier'}), '(pk=my_project.public_identifier)\n', (2463, 2496), False, 'from katka import models\n'), ((2879, 2938), 'katka.models.Project.objects.get', 'models.Project.objects.get', ([], {'pk': 'my_project.public_identifier'}), '(pk=my_project.public_identifier)\n', (2905, 2938), False, 'from katka import models\n'), ((4014, 4073), 'katka.models.Project.objects.get', 'models.Project.objects.get', ([], {'pk': 'my_project.public_identifier'}), '(pk=my_project.public_identifier)\n', (4040, 4073), False, 'from katka import models\n'), ((4201, 4231), 'katka.models.Project.objects.count', 'models.Project.objects.count', ([], {}), '()\n', (4229, 4231), False, 'from katka import models\n'), ((4483, 4527), 'katka.models.Project.objects.get', 'models.Project.objects.get', ([], {'name': '"""Project X"""'}), "(name='Project X')\n", (4509, 4527), False, 'from katka import models\n'), ((481, 498), 'uuid.UUID', 'UUID', (['parsed_team'], {}), '(parsed_team)\n', (485, 498), False, 'from uuid import UUID\n'), ((984, 1001), 'uuid.UUID', 'UUID', (['parsed_team'], {}), '(parsed_team)\n', (988, 1001), False, 'from uuid import UUID\n'), ((1955, 1975), 'uuid.UUID', 'UUID', (["parsed['team']"], {}), "(parsed['team'])\n", (1959, 1975), False, 'from uuid import UUID\n'), ((4580, 4610), 'katka.models.Project.objects.count', 'models.Project.objects.count', ([], {}), '()\n', (4608, 4610), False, 'from katka import models\n')]
|
# -*- coding: utf-8 -*-
from luckydonaldUtils.logger import logging
from pytgbot.bot import Bot
from pytgbot.exceptions import TgApiServerException, TgApiParseException
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
class Webhook(Bot):
"""
Subclass of Bot, will be returned of a sucessful webhook setting.
Differs with the normal Bot class, as the sending function stores the result to send,
so you can actually get that and return the data on your incomming message.
"""
stored_request = None
def _prepare_request(self, command, query):
"""
:param command: The Url command parameter
:type command: str
:param query: will get json encoded.
:type query: dict
:return:
"""
from luckydonaldUtils.encoding import to_native as n
from pytgbot.api_types.sendable import Sendable
from pytgbot.api_types import as_array
from DictObject import DictObject
import json
params = {}
for key in query.keys():
element = query[key]
if element is not None:
if isinstance(element, Sendable):
params[key] = json.dumps(as_array(element))
else:
params[key] = element
url = self._base_url.format(api_key=n(self.api_key), command=n(command))
return DictObject(url=url, params=params)
# end def
def _do_request(self, url, params=None, files=None, use_long_polling=None, request_timeout=None):
"""
:param url: The complete url to send to
:type url: str
:keyword params: Parameter for that connection
:keyword files: Optional files parameters
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:return: json data received
:rtype: DictObject.DictObject
"""
import requests
r = requests.post(url, params=params, files=files, stream=use_long_polling,
verify=True, timeout=request_timeout)
# No self signed certificates. Telegram should be trustworthy anyway...
from DictObject import DictObject
try:
logger.debug("Response: {}".format(r.json()))
json_data = DictObject.objectify(r.json())
except Exception:
logger.exception("Parsing answer failed.\nRequest: {r!s}\nContent: {r.content}".format(r=r))
raise
# end if
json_data["response"] = r # TODO: does this failes on json lists? Does TG does that?
return json_data
# end def
def _process_response(self, json_data):
# TG should always return an dict, with at least a status or something.
if self.return_python_objects:
if json_data.ok != True:
raise TgApiServerException(
error_code=json_data.error_code if "error_code" in json_data else None,
response=json_data.response if "response" in json_data else None,
description=json_data.description if "description" in json_data else None,
request=r.request
)
# end if not ok
if "result" not in json_data:
raise TgApiParseException('Key "result" is missing.')
# end if no result
return json_data.result
# end if return_python_objects
return json_data
# end def
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Send a request to the api.
If the bot is set to return the json objects, it will look like this:
```json
{
"ok": bool,
"result": {...},
# optionally present:
"description": "human-readable description of the result",
"error_code": int
}
```
:param command: The Url command parameter
:type command: str
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:keyword files: if it needs to send files.
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:param query: will get json encoded.
:return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type.
:rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
"""
params = self._prepare_request(command, query)
r = self._do_request(
params.url, params=params.params,
files=files, stream=use_long_polling, timeout=request_timeout
)
return self._process_response(r)
# end def do
|
[
"DictObject.DictObject",
"luckydonaldUtils.encoding.to_native",
"pytgbot.api_types.as_array",
"pytgbot.exceptions.TgApiServerException",
"luckydonaldUtils.logger.logging.getLogger",
"requests.post",
"pytgbot.exceptions.TgApiParseException"
] |
[((207, 234), 'luckydonaldUtils.logger.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (224, 234), False, 'from luckydonaldUtils.logger import logging\n'), ((1405, 1439), 'DictObject.DictObject', 'DictObject', ([], {'url': 'url', 'params': 'params'}), '(url=url, params=params)\n', (1415, 1439), False, 'from DictObject import DictObject\n'), ((2202, 2315), 'requests.post', 'requests.post', (['url'], {'params': 'params', 'files': 'files', 'stream': 'use_long_polling', 'verify': '(True)', 'timeout': 'request_timeout'}), '(url, params=params, files=files, stream=use_long_polling,\n verify=True, timeout=request_timeout)\n', (2215, 2315), False, 'import requests\n'), ((1353, 1368), 'luckydonaldUtils.encoding.to_native', 'n', (['self.api_key'], {}), '(self.api_key)\n', (1354, 1368), True, 'from luckydonaldUtils.encoding import to_native as n\n'), ((1378, 1388), 'luckydonaldUtils.encoding.to_native', 'n', (['command'], {}), '(command)\n', (1379, 1388), True, 'from luckydonaldUtils.encoding import to_native as n\n'), ((3108, 3372), 'pytgbot.exceptions.TgApiServerException', 'TgApiServerException', ([], {'error_code': "(json_data.error_code if 'error_code' in json_data else None)", 'response': "(json_data.response if 'response' in json_data else None)", 'description': "(json_data.description if 'description' in json_data else None)", 'request': 'r.request'}), "(error_code=json_data.error_code if 'error_code' in\n json_data else None, response=json_data.response if 'response' in\n json_data else None, description=json_data.description if 'description' in\n json_data else None, request=r.request)\n", (3128, 3372), False, 'from pytgbot.exceptions import TgApiServerException, TgApiParseException\n'), ((3551, 3598), 'pytgbot.exceptions.TgApiParseException', 'TgApiParseException', (['"""Key "result" is missing."""'], {}), '(\'Key "result" is missing.\')\n', (3570, 3598), False, 'from pytgbot.exceptions import TgApiServerException, TgApiParseException\n'), ((1226, 1243), 'pytgbot.api_types.as_array', 'as_array', (['element'], {}), '(element)\n', (1234, 1243), False, 'from pytgbot.api_types import as_array\n')]
|
from solution.recollection.get_tweets import get_tweets
from solution.ml.sentiment import AddSentimentAnalysis
from solution.ml.clustering import cluster
from solution.viz.wordclouds import GenWordcloud
from solution.viz.ngrams import ngram
from solution.viz.visualization import scatter
import pandas as pd
import os.path
def process(pais, prioridad):
file = f'data/{pais}/{prioridad}/embs.pkl'
if not os.path.exists(file):
df = cluster(
file = f'data/{pais}/{prioridad}/tweets.json',
verbose=True
)
df.to_pickle(file)
df = AddSentimentAnalysis(file)
df.to_pickle(f'data/{pais}/{prioridad}/sentiment.pkl')
def main():
# Uncomment if data not available
# get_tweets(n = 5000)
paises = ['mexico']
prioridades = ['excelencia_operativa']
# Uncomment to run all the 20 dataset (countries and priorities)
# paises = ['argentina', 'colombia', 'mexico', 'peru', 'spain']
# prioridades = ['crecimiento', 'excelencia_operativa','futuro_sostenible', 'salud_financiera']
for pais in paises:
for prioridad in prioridades:
process(pais, prioridad)
path = f'data/{pais}/{prioridad}/'
print('HEY')
df = pd.read_pickle(path + 'embs.pkl')
GenWordcloud(df, path + 'wordcloud.png')
ngram(df, 2, path + 'ngram.html')
scatter(df, path + 'scatter.html')
df = pd.read_pickle(path + 'sentiment.pkl')
if __name__ == '__main__':
print('HEY')
main()
|
[
"solution.ml.sentiment.AddSentimentAnalysis",
"solution.ml.clustering.cluster",
"solution.viz.visualization.scatter",
"solution.viz.ngrams.ngram",
"pandas.read_pickle",
"solution.viz.wordclouds.GenWordcloud"
] |
[((452, 518), 'solution.ml.clustering.cluster', 'cluster', ([], {'file': 'f"""data/{pais}/{prioridad}/tweets.json"""', 'verbose': '(True)'}), "(file=f'data/{pais}/{prioridad}/tweets.json', verbose=True)\n", (459, 518), False, 'from solution.ml.clustering import cluster\n'), ((596, 622), 'solution.ml.sentiment.AddSentimentAnalysis', 'AddSentimentAnalysis', (['file'], {}), '(file)\n', (616, 622), False, 'from solution.ml.sentiment import AddSentimentAnalysis\n'), ((1259, 1292), 'pandas.read_pickle', 'pd.read_pickle', (["(path + 'embs.pkl')"], {}), "(path + 'embs.pkl')\n", (1273, 1292), True, 'import pandas as pd\n'), ((1306, 1346), 'solution.viz.wordclouds.GenWordcloud', 'GenWordcloud', (['df', "(path + 'wordcloud.png')"], {}), "(df, path + 'wordcloud.png')\n", (1318, 1346), False, 'from solution.viz.wordclouds import GenWordcloud\n'), ((1360, 1393), 'solution.viz.ngrams.ngram', 'ngram', (['df', '(2)', "(path + 'ngram.html')"], {}), "(df, 2, path + 'ngram.html')\n", (1365, 1393), False, 'from solution.viz.ngrams import ngram\n'), ((1407, 1441), 'solution.viz.visualization.scatter', 'scatter', (['df', "(path + 'scatter.html')"], {}), "(df, path + 'scatter.html')\n", (1414, 1441), False, 'from solution.viz.visualization import scatter\n'), ((1460, 1498), 'pandas.read_pickle', 'pd.read_pickle', (["(path + 'sentiment.pkl')"], {}), "(path + 'sentiment.pkl')\n", (1474, 1498), True, 'import pandas as pd\n')]
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
from tkinter import filedialog
import tkinter.messagebox as tkmsg
import os
import time
import sys
import subprocess
############add APP info here###############
APP_NAME = "marueditor"
APP_NAME_TITLE = "Maruediter"
APP_LUA_JP = [""]
APP_LUA_EN = [""]
APP_TYPE = "32" # 64 , 32 , select
APP_ICON = "./marueditor.png"
APP_VER = "1.0"
APP_COMPANY = "Marusoftare"
############################################
lang = 0
step_v = 0
def bye():
next_step()
root.destroy()
exit()
def next_step():
global step_v, step
step_v = step_v + 1
step.set(step_v)
root = tkinter.Tk(className=APP_NAME+" Installer")
step = tkinter.IntVar()
if os.path.exists(APP_ICON):
root.iconphoto(True, tkinter.PhotoImage(file=APP_ICON))
step.set(0)
lang_v = tkinter.IntVar()
root.title("Language")
root.protocol("WM_DELETE_WINDOW", bye)
l = ttk.Label(root, text="Please select language when use install.")
l.pack(side="top")
b1 = ttk.Button(root, text='NEXT', command=next_step)
b1.pack(side="bottom")
rb1 = ttk.Radiobutton(text = '日本語', variable = lang_v, value = 0)
rb1.pack()
rb2 = ttk.Radiobutton(text = 'English', variable = lang_v, value = 1)
rb2.pack()
root.wait_variable(step)
root.geometry("500x300")
lang = lang_v.get()
lua_v = tkinter.IntVar()
l2 = tkinter.Listbox(root)
l2.pack()
if lang == 0:
root.title(APP_NAME_TITLE + " インストーラ")
rb1.configure(text="許諾", variable = lua_v)
rb2.configure(text="拒否", variable = lua_v)
l.configure(text="下記の利用規約をお読みください。")
for i in range(len(APP_LUA_JP)):
l2.insert("end", APP_LUA_JP[i])
else:
root.title(APP_NAME_TITLE + " Installer")
rb1.configure(text="accept", variable = lua_v)
rb2.configure(text="ignore", variable = lua_v)
l.configure(text="Please read LUA.")
for i in range(len(APP_LUA_EN)):
l2.insert("end", APP_LUA_EN[i])
root.wait_variable(step)
if lua_v.get() == 1:
exit()
l2.destroy()
if APP_TYPE == "select":
type_v = tkinter.IntVar()
if lang == 0:
l.configure(text="バージョンを選択してください。")
else:
l.configure(text="Please select version.")
rb1.configure(text="64bit", variable=type_v)
rb2.configure(text="32bit", variable=type_v)
root.wait_variable(step)
if type_v.get() == 0:
APP_TYPE = "64"
else:
APP_TYPE = "32"
else:
pass
rb1.destroy()
rb2.destroy()
e = ttk.Entry(root)
e.pack()
if os.name == 'nt':
if APP_TYPE == "64":
def_dir = os.environ["ProgramW6432"]
else:
def_dir = os.environ["ProgramFiles(x86)"]
else:
def_dir = "/"
e.insert("0",def_dir)
def select():
e.delete(0,tkinter.END)
e.insert("0",filedialog.askdirectory(initialdir=def_dir))
if e.get() == "":
e.insert("end",def_dir)
if lang == 0:
b2 = ttk.Button(root, text="選択", command=select)
l.configure(text="インストール先を選択してください。")
b1.configure(text="インストール")
else:
b2 = ttk.Button(root, text="Select", command=select)
l.configure(text="Please select install directory.")
b1.configure(text="Install")
b2.pack()
root.wait_variable(step)
i_dir = e.get()
if not os.path.exists(i_dir):
i_dir = def_dir
i_dir = os.path.join(i_dir,APP_NAME)
print(i_dir)
e.destroy()
b2.destroy()
p = ttk.Progressbar(root, orient="h", length=200, mode='determinate', maximum=100, value=0)
p.pack()
if lang == 0:
l.configure(text="インストール中です。")
else:
l.configure(text="Installing.")
b1.config(state="disable")
try:
if not os.path.exists(i_dir):
os.mkdir(i_dir)
p.configure(value=10)
p.update()
except PermissionError:
try:
os.chmod(os.path.dirname(i_dir),777)
os.mkdir(i_dir)
p.configure(value=10)
p.update()
except PermissionError:
if os.name == 'nt':
if lang == 0:
tkmsg.showerror("エラー","権限エラー:\n管理者権限で実行してください。")
exit()
else:
tkmsg.showerror("Error","PermissionError:\nPlease run in administer.")
exit()
else:
if lang == 0:
tkmsg.showerror("エラー","権限エラー:\nroot権限で('sudo'を付けて)再実行してください。")
exit()
else:
tkmsg.showerror("Error","PermissionError:\nPlease run in root.(add 'sudo')")
exit()
############add install command here#########
root.wait_variable(step)
#############################################
p.configure(value=100)
p.update()
if lang == 0:
l.configure(text="完了しました。")
b1.configure(text="終了")
else:
l.configure(text="Done.")
b1.configure(text="Exit")
b1.config(state="active")
root.wait_variable(step)
exit()
root.mainloop()
|
[
"tkinter.ttk.Label",
"tkinter.PhotoImage",
"os.mkdir",
"tkinter.ttk.Entry",
"tkinter.ttk.Radiobutton",
"tkinter.Listbox",
"os.path.exists",
"os.path.dirname",
"tkinter.ttk.Progressbar",
"tkinter.messagebox.showerror",
"tkinter.filedialog.askdirectory",
"tkinter.ttk.Button",
"tkinter.IntVar",
"os.path.join",
"tkinter.Tk"
] |
[((664, 709), 'tkinter.Tk', 'tkinter.Tk', ([], {'className': "(APP_NAME + ' Installer')"}), "(className=APP_NAME + ' Installer')\n", (674, 709), False, 'import tkinter\n'), ((715, 731), 'tkinter.IntVar', 'tkinter.IntVar', ([], {}), '()\n', (729, 731), False, 'import tkinter\n'), ((735, 759), 'os.path.exists', 'os.path.exists', (['APP_ICON'], {}), '(APP_ICON)\n', (749, 759), False, 'import os\n'), ((843, 859), 'tkinter.IntVar', 'tkinter.IntVar', ([], {}), '()\n', (857, 859), False, 'import tkinter\n'), ((926, 990), 'tkinter.ttk.Label', 'ttk.Label', (['root'], {'text': '"""Please select language when use install."""'}), "(root, text='Please select language when use install.')\n", (935, 990), False, 'from tkinter import ttk\n'), ((1015, 1063), 'tkinter.ttk.Button', 'ttk.Button', (['root'], {'text': '"""NEXT"""', 'command': 'next_step'}), "(root, text='NEXT', command=next_step)\n", (1025, 1063), False, 'from tkinter import ttk\n'), ((1093, 1146), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', ([], {'text': '"""日本語"""', 'variable': 'lang_v', 'value': '(0)'}), "(text='日本語', variable=lang_v, value=0)\n", (1108, 1146), False, 'from tkinter import ttk\n'), ((1170, 1227), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', ([], {'text': '"""English"""', 'variable': 'lang_v', 'value': '(1)'}), "(text='English', variable=lang_v, value=1)\n", (1185, 1227), False, 'from tkinter import ttk\n'), ((1323, 1339), 'tkinter.IntVar', 'tkinter.IntVar', ([], {}), '()\n', (1337, 1339), False, 'import tkinter\n'), ((1345, 1366), 'tkinter.Listbox', 'tkinter.Listbox', (['root'], {}), '(root)\n', (1360, 1366), False, 'import tkinter\n'), ((2424, 2439), 'tkinter.ttk.Entry', 'ttk.Entry', (['root'], {}), '(root)\n', (2433, 2439), False, 'from tkinter import ttk\n'), ((3206, 3235), 'os.path.join', 'os.path.join', (['i_dir', 'APP_NAME'], {}), '(i_dir, APP_NAME)\n', (3218, 3235), False, 'import os\n'), ((3277, 3369), 'tkinter.ttk.Progressbar', 'ttk.Progressbar', (['root'], {'orient': '"""h"""', 'length': '(200)', 'mode': '"""determinate"""', 'maximum': '(100)', 'value': '(0)'}), "(root, orient='h', length=200, mode='determinate', maximum=\n 100, value=0)\n", (3292, 3369), False, 'from tkinter import ttk\n'), ((2026, 2042), 'tkinter.IntVar', 'tkinter.IntVar', ([], {}), '()\n', (2040, 2042), False, 'import tkinter\n'), ((2826, 2869), 'tkinter.ttk.Button', 'ttk.Button', (['root'], {'text': '"""選択"""', 'command': 'select'}), "(root, text='選択', command=select)\n", (2836, 2869), False, 'from tkinter import ttk\n'), ((2959, 3006), 'tkinter.ttk.Button', 'ttk.Button', (['root'], {'text': '"""Select"""', 'command': 'select'}), "(root, text='Select', command=select)\n", (2969, 3006), False, 'from tkinter import ttk\n'), ((3155, 3176), 'os.path.exists', 'os.path.exists', (['i_dir'], {}), '(i_dir)\n', (3169, 3176), False, 'import os\n'), ((786, 819), 'tkinter.PhotoImage', 'tkinter.PhotoImage', ([], {'file': 'APP_ICON'}), '(file=APP_ICON)\n', (804, 819), False, 'import tkinter\n'), ((2704, 2747), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': 'def_dir'}), '(initialdir=def_dir)\n', (2727, 2747), False, 'from tkinter import filedialog\n'), ((3508, 3529), 'os.path.exists', 'os.path.exists', (['i_dir'], {}), '(i_dir)\n', (3522, 3529), False, 'import os\n'), ((3539, 3554), 'os.mkdir', 'os.mkdir', (['i_dir'], {}), '(i_dir)\n', (3547, 3554), False, 'import os\n'), ((3690, 3705), 'os.mkdir', 'os.mkdir', (['i_dir'], {}), '(i_dir)\n', (3698, 3705), False, 'import os\n'), ((3654, 3676), 'os.path.dirname', 'os.path.dirname', (['i_dir'], {}), '(i_dir)\n', (3669, 3676), False, 'import os\n'), ((3853, 3905), 'tkinter.messagebox.showerror', 'tkmsg.showerror', (['"""エラー"""', '"""権限エラー:\n管理者権限で実行してください。"""'], {}), '(\'エラー\', """権限エラー:\n管理者権限で実行してください。""")\n', (3868, 3905), True, 'import tkinter.messagebox as tkmsg\n'), ((3959, 4033), 'tkinter.messagebox.showerror', 'tkmsg.showerror', (['"""Error"""', '"""PermissionError:\nPlease run in administer."""'], {}), '(\'Error\', """PermissionError:\nPlease run in administer.""")\n', (3974, 4033), True, 'import tkinter.messagebox as tkmsg\n'), ((4109, 4175), 'tkinter.messagebox.showerror', 'tkmsg.showerror', (['"""エラー"""', '"""権限エラー:\nroot権限で(\'sudo\'を付けて)再実行してください。"""'], {}), '(\'エラー\', """権限エラー:\nroot権限で(\'sudo\'を付けて)再実行してください。""")\n', (4124, 4175), True, 'import tkinter.messagebox as tkmsg\n'), ((4229, 4314), 'tkinter.messagebox.showerror', 'tkmsg.showerror', (['"""Error"""', '"""PermissionError:\nPlease run in root.(add \'sudo\')"""'], {}), '(\'Error\', """PermissionError:\nPlease run in root.(add \'sudo\')"""\n )\n', (4244, 4314), True, 'import tkinter.messagebox as tkmsg\n')]
|
# Auto generated configuration file
# using:
# Revision: 1.381.2.28
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: step4 --data --conditions auto:com10 --scenario pp -s ALCAHARVEST:SiStripGains --filein file:PromptCalibProdSiStripGains.root -n -1 --no_exec
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing("analysis")
options.register("globalTag", "auto:run3_data_express", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Global tag (express, to check the homogeneity of the calibration range)")
options.register("outputDbFile", "sqlite_file:promptCalibConditions.db", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Connection string of output database")
options.register("fitMethod", "Legacy", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Fit strategy (Legacy, DDRng, DDRngAllConv, or DDRngConvExceptTOBL5L6")
options.register("DQMOutput", False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, "Produce DQM output")
options.parseArguments()
process = cms.Process('ALCAHARVEST')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.AlCaHarvesting_cff')
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring(options.inputFiles),
processingMode = cms.untracked.string('RunsAndLumis')
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('FULLMERGE')
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.28 $'),
annotation = cms.untracked.string('step4 nevts:-1'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.load("Configuration.StandardSequences.DQMSaverAtJobEnd_cff") ## multi-run
## temporary workaround
process.load("FWCore.Services.InitRootHandlers_cfi")
process.InitRootHandlers.ResetRootErrHandler = cms.untracked.bool(False)
# Additional output definition
# Other statements
process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiStripGains_dbOutput)
process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiStripGains_metadata)
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.PoolDBOutputService.connect = cms.string(options.outputDbFile)
# Path and EndPath definitions
process.ALCAHARVESTDQMSaveAndMetadataWriter = cms.Path(process.dqmSaver+process.pclMetadataWriter)
process.SiStripGains = cms.Path(process.ALCAHARVESTSiStripGains)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(options.DQMOutput)
if options.outputFile:
process.alcaSiStripGainsHarvester.StoreGainsTree = cms.untracked.bool(True)
process.TFileService = cms.Service("TFileService", fileName = cms.string(options.outputFile))
process.alcaSiStripGainsHarvester.GoodFracForTagProd = cms.untracked.double(.95)
process.alcaSiStripGainsHarvester.NClustersForTagProd = cms.untracked.double(2.e8)
if options.fitMethod == "Legacy":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
elif options.fitMethod == "DDRng":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
elif options.fitMethod == "DDRngAllConv":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(True)
elif options.fitMethod == "DDRngConvExceptTOBL5L6":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
else:
raise RuntimeError("Unknown fit method: {0}".format(options.fitMethod))
# Schedule definition
process.schedule = cms.Schedule(process.SiStripGains,
process.ALCAHARVESTDQMSaveAndMetadataWriter)
#process.alcaSiStripGainsHarvester.calibrationMode = cms.untracked.string("IsoBunch")
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.untracked.double",
"FWCore.ParameterSet.Config.untracked.vstring",
"Configuration.AlCa.GlobalTag.GlobalTag",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.VarParsing.VarParsing",
"FWCore.ParameterSet.Config.Schedule",
"FWCore.ParameterSet.Config.Path"
] |
[((442, 464), 'FWCore.ParameterSet.VarParsing.VarParsing', 'VarParsing', (['"""analysis"""'], {}), "('analysis')\n", (452, 464), False, 'from FWCore.ParameterSet.VarParsing import VarParsing\n'), ((1162, 1188), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""ALCAHARVEST"""'], {}), "('ALCAHARVEST')\n", (1173, 1188), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2660, 2685), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (2678, 2685), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2980, 3031), 'Configuration.AlCa.GlobalTag.GlobalTag', 'GlobalTag', (['process.GlobalTag', 'options.globalTag', '""""""'], {}), "(process.GlobalTag, options.globalTag, '')\n", (2989, 3031), False, 'from Configuration.AlCa.GlobalTag import GlobalTag\n'), ((3071, 3103), 'FWCore.ParameterSet.Config.string', 'cms.string', (['options.outputDbFile'], {}), '(options.outputDbFile)\n', (3081, 3103), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3182, 3236), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(process.dqmSaver + process.pclMetadataWriter)'], {}), '(process.dqmSaver + process.pclMetadataWriter)\n', (3190, 3236), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3258, 3299), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.ALCAHARVESTSiStripGains'], {}), '(process.ALCAHARVESTSiStripGains)\n', (3266, 3299), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3333, 3370), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['options.DQMOutput'], {}), '(options.DQMOutput)\n', (3351, 3370), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3629, 3655), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(0.95)'], {}), '(0.95)\n', (3649, 3655), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3711, 3744), 'FWCore.ParameterSet.Config.untracked.double', 'cms.untracked.double', (['(200000000.0)'], {}), '(200000000.0)\n', (3731, 3744), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5099, 5178), 'FWCore.ParameterSet.Config.Schedule', 'cms.Schedule', (['process.SiStripGains', 'process.ALCAHARVESTDQMSaveAndMetadataWriter'], {}), '(process.SiStripGains, process.ALCAHARVESTDQMSaveAndMetadataWriter)\n', (5111, 5178), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3450, 3474), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (3468, 3474), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3831, 3856), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (3849, 3856), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3920, 3945), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (3938, 3945), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4016, 4041), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (4034, 4041), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1784, 1807), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(-1)'], {}), '(-1)\n', (1803, 1807), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1893, 1916), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (1914, 1916), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1934, 1975), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['options.inputFiles'], {}), '(options.inputFiles)\n', (1955, 1975), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1998, 2034), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""RunsAndLumis"""'], {}), "('RunsAndLumis')\n", (2018, 2034), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2091, 2131), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""ProductNotFound"""'], {}), "('ProductNotFound')\n", (2112, 2131), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2148, 2181), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""FULLMERGE"""'], {}), "('FULLMERGE')\n", (2168, 2181), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2269, 2316), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""$Revision: 1.381.2.28 $"""'], {}), "('$Revision: 1.381.2.28 $')\n", (2289, 2316), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2335, 2373), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""step4 nevts:-1"""'], {}), "('step4 nevts:-1')\n", (2355, 2373), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2386, 2429), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""PyReleaseValidation"""'], {}), "('PyReleaseValidation')\n", (2406, 2429), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4136, 4160), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4154, 4160), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4224, 4249), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (4242, 4249), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4320, 4345), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (4338, 4345), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3541, 3571), 'FWCore.ParameterSet.Config.string', 'cms.string', (['options.outputFile'], {}), '(options.outputFile)\n', (3551, 3571), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4447, 4471), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4465, 4471), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4535, 4559), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4553, 4559), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4630, 4654), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4648, 4654), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4766, 4790), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4784, 4790), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4854, 4878), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (4872, 4878), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4949, 4974), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (4967, 4974), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, \
Dimension
import unittest
class TestCategory(unittest.TestCase):
def test_properties(self):
id = 'ID0'
index = 4
label = 'label with text'
category = Category(id, index, label)
self.assertEqual(category.id, id)
self.assertEqual(category.index, index)
self.assertEqual(category.label, label)
class TestBaseItem(unittest.TestCase):
def test_properties(self):
id = 'ID0'
index = 4
label = 'label with text'
category = BaseItem(id, index, label)
self.assertEqual(category.id, id)
self.assertEqual(category.index, index)
self.assertEqual(category.label, label)
def test_item_list_assignation(self):
item_list = ItemList()
self.assertRaises(ValueError, item_list.__setitem__, 0, 'd')
def test_item_list_count(self):
item_list = ItemList()
self.assertEqual(len(item_list), 0)
self.assertEqual(item_list.count, 0)
category1 = BaseItem('id', 0, 'label')
category2 = BaseItem('id', 1, 'label')
item_list.append(category1)
item_list.append(category2)
self.assertEqual(len(item_list), 2)
self.assertEqual(item_list.count, 2)
class TestDimension(unittest.TestCase):
def test_add_category(self):
id = 'ID0'
index = 4
label = 'label with text'
size = 2
dimension = Dimension(id, index, label, size)
category = Category(id, index, label)
self.assertEqual(dimension.categories.count, 0)
dimension.add_category(category)
self.assertEqual(dimension.categories.count, 1)
def test_create_from_json(self):
json = {
'label': "time",
'category': {
'index': {
'2010': '1',
'2011': '0'
},
'label': {
'2010': '2010',
'2011': 'test'
},
}
}
id = 'ID0'
index = 4
size = 5
label = 'time'
dimension = Dimension.create_from_json(id, index, size, json)
self.assertEqual(dimension.categories.count, 2)
self.assertEqual(dimension.label, label)
self.assertEqual(dimension.size, size)
self.assertEqual(dimension.categories[0].label, 'test')
|
[
"eurostatapiclient.models.dimension.Category",
"eurostatapiclient.models.dimension.Dimension",
"eurostatapiclient.models.dimension.Dimension.create_from_json",
"eurostatapiclient.models.dimension.BaseItem",
"eurostatapiclient.models.dimension.ItemList"
] |
[((273, 299), 'eurostatapiclient.models.dimension.Category', 'Category', (['id', 'index', 'label'], {}), '(id, index, label)\n', (281, 299), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((603, 629), 'eurostatapiclient.models.dimension.BaseItem', 'BaseItem', (['id', 'index', 'label'], {}), '(id, index, label)\n', (611, 629), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((832, 842), 'eurostatapiclient.models.dimension.ItemList', 'ItemList', ([], {}), '()\n', (840, 842), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((969, 979), 'eurostatapiclient.models.dimension.ItemList', 'ItemList', ([], {}), '()\n', (977, 979), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((1089, 1115), 'eurostatapiclient.models.dimension.BaseItem', 'BaseItem', (['"""id"""', '(0)', '"""label"""'], {}), "('id', 0, 'label')\n", (1097, 1115), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((1137, 1163), 'eurostatapiclient.models.dimension.BaseItem', 'BaseItem', (['"""id"""', '(1)', '"""label"""'], {}), "('id', 1, 'label')\n", (1145, 1163), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((1510, 1543), 'eurostatapiclient.models.dimension.Dimension', 'Dimension', (['id', 'index', 'label', 'size'], {}), '(id, index, label, size)\n', (1519, 1543), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((1563, 1589), 'eurostatapiclient.models.dimension.Category', 'Category', (['id', 'index', 'label'], {}), '(id, index, label)\n', (1571, 1589), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n'), ((2204, 2253), 'eurostatapiclient.models.dimension.Dimension.create_from_json', 'Dimension.create_from_json', (['id', 'index', 'size', 'json'], {}), '(id, index, size, json)\n', (2230, 2253), False, 'from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, Dimension\n')]
|
import os
import json
import requests
from elasticsearch import Elasticsearch, RequestsHttpConnection, helpers, exceptions
HOST = os.environ.get('ES_ENDPOINT')
INDEX = os.environ.get('ES_INDEX')
QUOTES_DUMP = os.environ.get('QUOTES_DUMP')
headers = {"Content-Type": "application/json"}
def main():
try:
es = Elasticsearch(
hosts=[{
"host": HOST.split('//')[1],
"port": 443
}],
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection,
)
with open(QUOTES_DUMP) as file:
quotes = json.load(file)
print(f"Ingesting {len(quotes)} quotes into {INDEX} index.")
for quote in quotes:
response = es.index(index=INDEX,
doc_type='_doc',
id=quote['id'],
body=quote,
request_timeout=60)
print("ElasticSearchService: Index creation response: ", response)
except Exception as exception:
# print some context about this error
print(exception)
raise exception
if __name__ == "__main__":
main()
|
[
"os.environ.get",
"json.load"
] |
[((131, 160), 'os.environ.get', 'os.environ.get', (['"""ES_ENDPOINT"""'], {}), "('ES_ENDPOINT')\n", (145, 160), False, 'import os\n'), ((169, 195), 'os.environ.get', 'os.environ.get', (['"""ES_INDEX"""'], {}), "('ES_INDEX')\n", (183, 195), False, 'import os\n'), ((210, 239), 'os.environ.get', 'os.environ.get', (['"""QUOTES_DUMP"""'], {}), "('QUOTES_DUMP')\n", (224, 239), False, 'import os\n'), ((630, 645), 'json.load', 'json.load', (['file'], {}), '(file)\n', (639, 645), False, 'import json\n')]
|
# Generated by Django 3.0.3 on 2020-03-17 02:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LibreBadge', '0013_auto_20200317_0236'),
]
operations = [
migrations.AlterField(
model_name='badgetemplate',
name='badge',
field=models.FileField(upload_to=''),
),
]
|
[
"django.db.models.FileField"
] |
[((345, 375), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (361, 375), False, 'from django.db import migrations, models\n')]
|
#<NAME>
import socket, threading, sys, time, os
import tkinter as tk
client_list = []
connections = {}
check_buttons = {}
window = tk.Tk()
text_header = tk.StringVar()
text_message_wid = tk.Text(window, height=5, width=40, font=("Calibri"))
outer_frame = tk.Frame(window)
cb_canvas = tk.Canvas(outer_frame, bd=0, height=180, width=300)
cb_frame = tk.Frame(cb_canvas)
check_button_all_var = tk.BooleanVar()
def gui(start_server_thread = False):
if start_server_thread:
#create server thread
server_thread = threading.Thread(target=start_server)
server_thread.start()
#static tkinter widgets
window.title('LAN-Notifier')
window.resizable(width=False, height=False)
lbl_text = "SERVER",HOST,"(",PORT,")"
lbl = tk.Label(window, text=lbl_text, font=("Arial Bold", 12))
lbl.grid(column=0, row=0)
text_header_wid = tk.Entry(window, width=40, textvariable=text_header, font=("Calibri"))
text_header_wid.grid(column=0, row=3, pady=5)
text_message_wid.grid(column=0, row=4, padx=10, pady=5)
clear_btn = tk.Button(window, text=" NEW ", command= lambda: clear_it())
clear_btn.grid(column=0, row=5, sticky="nw", padx=10, pady=5)
send_btn = tk.Button(window, text=" SEND ", command= lambda: send_it())
send_btn.grid(column=0, row=5, sticky="ne", padx=10, pady=5)
ysb = tk.Scrollbar(outer_frame, orient="vertical", command=cb_canvas.yview)
ysb.grid(column=1, row=0, sticky="ns")
cb_canvas.configure(yscrollcommand=ysb.set)
cb_canvas.grid(column=0, row=0, padx=2)
outer_frame.grid(column=0, row=2)
def on_closing():
from tkinter import messagebox
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
#sys.exit()
os._exit(0)
def set_cb_values(key):
if key['var'].get() == True:
key['checked'] = True
else:
key['checked'] = False
check_button_all.deselect()
if all_checked():
check_button_all.select()
def manage_msg(txt_msg):
msg_lines = txt_msg.split("\n")
count = 0
for i, line in enumerate(msg_lines):
if len(line) > 40:
count += 1
tmp_line = line[:40]
msg_lines[i] = line[:tmp_line.rfind(" ")]+"\n"+line[tmp_line.rfind(" ")+1:40]+line[40:]
txt_msg = "\n".join(msg_lines)
return manage_msg(txt_msg)
if count == 0:
return txt_msg
def send_it():
already_sent = True
text_message_wid.config(state=tk.DISABLED)
txt_head = text_header.get()
txt_msg = text_message_wid.get("1.0", "end-1c")
txt_msg = manage_msg(txt_msg)
if txt_head == "" or txt_msg == "":
from tkinter import messagebox
messagebox.showwarning('Send Message','Write a header and a message.\n Select clients.\n Then press SEND button.')
return False
send_time = time.strftime("%H:%M:%S", time.localtime())
msg = send_time + "|" + txt_head + "|" + txt_msg
txt_msg = txt_msg.replace('\n', ' ')
log = "["+get_time()+"] MESSAGE: "+txt_head+"|"+txt_msg+" sent to user(s):\n"
write_log_file(log)
for addr in check_buttons:
if check_buttons[addr]['checked'] == True and check_buttons[addr]['color'] != "green":
log = str(addr)+", "
write_log_file(log)
already_sent = False
check_buttons[addr]['sock'].send(msg.encode())
time.sleep(1)
if check_buttons[addr]['color'] == "":
check_buttons[addr]['color'] = "red"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
write_log_file("\n")
if already_sent == True:
from tkinter import messagebox
messagebox.showwarning('Send Message','This message is already sent to all selected clients.\n Press NEW button to send a new message.')
return False
def clear_it():
text_message_wid.config(state=tk.NORMAL)
#text_header_wid.delete(0, tk.END)
text_message_wid.delete("1.0", "end-1c")
for addr in check_buttons:
check_buttons[addr]['color'] = ""
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
def check_all():
if check_button_all_var.get() == True:
for addr in check_buttons:
check_buttons[addr]['checked'] = True
check_buttons[addr]['widget'].select()
def all_checked():
for addr in check_buttons:
if check_buttons[addr]['checked'] == True:
continue
else:
return False
return True
def on_mousewheel(event):
cb_canvas.yview_scroll(-1*(event.delta//120), "units")
#destroy previous checkbutton widgets
for child in cb_frame.winfo_children():
child.destroy()
#remove checkbuttons
cb_del = check_buttons.keys() - connections.keys()
if cb_del:
del check_buttons[cb_del.pop()]
#check_all checkbutton
check_button_all = tk.Checkbutton(window, text="All",
onvalue=True, offvalue=False,
var=check_button_all_var,
command=lambda: check_all())
check_button_all.grid(column=0, row=1)
#show checkbuttons and indicators
for row, addr in enumerate(connections.keys()):
checkbutton_text = addr[0]
#check client_list for address' name
for client in client_list:
if client[0] == addr[0]:
client_name = client[1]
checkbutton_text = addr[0] + " [" + client_name + "]"
row += 1 #row 0: check_all checkbutton
if addr in check_buttons: #old checkbuttons
check_buttons[addr]['widget'] = tk.Checkbutton(cb_frame, text=checkbutton_text,
onvalue=True, offvalue=False,
var=check_buttons[addr]['var'],
command=lambda key=check_buttons[addr]: set_cb_values(key))
if check_buttons[addr]['checked'] == True:
check_buttons[addr]['widget'].select()
check_buttons[addr]['canvas'] = tk.Canvas(cb_frame, width=20, height=28)
check_buttons[addr]['indicator'] = check_buttons[addr]['canvas'].create_oval(10, 10, 20, 20,
fill=check_buttons[addr]['color'])
else: #new checkbuttons
check_buttons[addr] = {}
check_buttons[addr]['sock'] = connections[addr]
check_buttons[addr]['var'] = tk.BooleanVar()
check_buttons[addr]['widget'] = tk.Checkbutton(cb_frame, text=checkbutton_text,
onvalue=True, offvalue=False,
var=check_buttons[addr]['var'],
command=lambda key=check_buttons[addr]: set_cb_values(key))
if check_button_all_var.get() == True:
check_buttons[addr]['checked'] = True
check_buttons[addr]['widget'].select()
else:
check_buttons[addr]['checked'] = False
check_buttons[addr]['color'] = ""
check_buttons[addr]['canvas'] = tk.Canvas(cb_frame, width=20, height=28)
check_buttons[addr]['indicator'] = check_buttons[addr]['canvas'].create_oval(10, 10, 20, 20,
fill=check_buttons[addr]['color'])
check_buttons[addr]['widget'].grid(row=row, column=0)
check_buttons[addr]['canvas'].grid(row=row, column=1)
#manage checkbutton canvas
cb_frame.update()
cb_canvas.configure(scrollregion=(1,1,0,cb_frame.winfo_height()))
cb_canvas.bind_all("<MouseWheel>", on_mousewheel)
cb_canvas.create_window(outer_frame.winfo_width()//2, 0, window=cb_frame, anchor='n')
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
def handle_client(conn, addr):
# display client address
connections[addr] = conn
log = "["+get_time()+"] NEW CONNECTION: "+str(addr[0])+"\n"
write_log_file(log)
while True:
try:
# receave message from client
sig = conn.recv(64).decode()
if sig == "k":
check_buttons[addr]['color'] = "yellow"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
log = "["+get_time()+"] "+str(addr)+": got the message\n"
write_log_file(log)
elif sig == "ROGER":
check_buttons[addr]['color'] = "green"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
log = "["+get_time()+"] "+str(addr)+": read the message\n"
write_log_file(log)
except Exception as e:
# disconnect the server
conn.close()
del connections[addr]
log = "["+get_time()+"] "+str(addr)+": "+str(e)+"\n"
write_log_file(log)
window.after(0, gui)
# kill thread
sys.exit()
def start_server():
log = "\n=============== STARTING SERVER: "+HOST+" "+str(PORT)+" ["+get_time()+"] ===============\n"
write_log_file(log)
# allow maximum 3 connections to the socket
s.listen(10)
while True:
# wait till a client accept connection
conn, addr = s.accept()
# create a thread to handle each connection
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
window.after(0, gui)
def get_time():
t = time.strftime("%d-%m-%Y %H:%M:%S", time.localtime())
return t
def write_log_file(text):
with open("log.txt", "a") as lf:
lf.write(text)
def read_client_list():
global PORT
try:
with open("client_list.txt", "r") as f:
if len(f.readline()) <= 6:
f.seek(0)
PORT = int(next(f))
else:
f.seek(0)
PORT = 5050
for client in f.readlines():
client_list.append(client.strip().split(";"))
except FileNotFoundError:
PORT = 5050
if __name__ == "__main__":
read_client_list()
# take the server name and port name
HOST = socket.gethostbyname(socket.gethostname())
#PORT = 5050
# create a socket at server side using TCP / IP protocol
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket with server and port number
s.bind((HOST, PORT))
gui(True)
|
[
"tkinter.StringVar",
"tkinter.Text",
"threading.Thread",
"tkinter.Canvas",
"socket.socket",
"tkinter.Entry",
"tkinter.messagebox.showwarning",
"tkinter.Scrollbar",
"time.sleep",
"socket.gethostname",
"os._exit",
"tkinter.BooleanVar",
"tkinter.messagebox.askokcancel",
"tkinter.Frame",
"sys.exit",
"tkinter.Label",
"tkinter.Tk",
"time.localtime"
] |
[((141, 148), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (146, 148), True, 'import tkinter as tk\n'), ((164, 178), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (176, 178), True, 'import tkinter as tk\n'), ((199, 250), 'tkinter.Text', 'tk.Text', (['window'], {'height': '(5)', 'width': '(40)', 'font': '"""Calibri"""'}), "(window, height=5, width=40, font='Calibri')\n", (206, 250), True, 'import tkinter as tk\n'), ((268, 284), 'tkinter.Frame', 'tk.Frame', (['window'], {}), '(window)\n', (276, 284), True, 'import tkinter as tk\n'), ((298, 349), 'tkinter.Canvas', 'tk.Canvas', (['outer_frame'], {'bd': '(0)', 'height': '(180)', 'width': '(300)'}), '(outer_frame, bd=0, height=180, width=300)\n', (307, 349), True, 'import tkinter as tk\n'), ((362, 381), 'tkinter.Frame', 'tk.Frame', (['cb_canvas'], {}), '(cb_canvas)\n', (370, 381), True, 'import tkinter as tk\n'), ((406, 421), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (419, 421), True, 'import tkinter as tk\n'), ((11458, 11507), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (11471, 11507), False, 'import socket, threading, sys, time, os\n'), ((550, 587), 'threading.Thread', 'threading.Thread', ([], {'target': 'start_server'}), '(target=start_server)\n', (566, 587), False, 'import socket, threading, sys, time, os\n'), ((805, 861), 'tkinter.Label', 'tk.Label', (['window'], {'text': 'lbl_text', 'font': "('Arial Bold', 12)"}), "(window, text=lbl_text, font=('Arial Bold', 12))\n", (813, 861), True, 'import tkinter as tk\n'), ((924, 992), 'tkinter.Entry', 'tk.Entry', (['window'], {'width': '(40)', 'textvariable': 'text_header', 'font': '"""Calibri"""'}), "(window, width=40, textvariable=text_header, font='Calibri')\n", (932, 992), True, 'import tkinter as tk\n'), ((1447, 1516), 'tkinter.Scrollbar', 'tk.Scrollbar', (['outer_frame'], {'orient': '"""vertical"""', 'command': 'cb_canvas.yview'}), "(outer_frame, orient='vertical', command=cb_canvas.yview)\n", (1459, 1516), True, 'import tkinter as tk\n'), ((1787, 1841), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""Quit"""', '"""Do you want to quit?"""'], {}), "('Quit', 'Do you want to quit?')\n", (1809, 1841), False, 'from tkinter import messagebox\n'), ((10465, 10522), 'threading.Thread', 'threading.Thread', ([], {'target': 'handle_client', 'args': '(conn, addr)'}), '(target=handle_client, args=(conn, addr))\n', (10481, 10522), False, 'import socket, threading, sys, time, os\n'), ((10650, 10666), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10664, 10666), False, 'import socket, threading, sys, time, os\n'), ((11346, 11366), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (11364, 11366), False, 'import socket, threading, sys, time, os\n'), ((1911, 1922), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (1919, 1922), False, 'import socket, threading, sys, time, os\n'), ((3017, 3143), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Send Message"""', '"""Write a header and a message.\n Select clients.\n Then press SEND button."""'], {}), '(\'Send Message\',\n """Write a header and a message.\n Select clients.\n Then press SEND button."""\n )\n', (3039, 3143), False, 'from tkinter import messagebox\n'), ((3205, 3221), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3219, 3221), False, 'import socket, threading, sys, time, os\n'), ((4215, 4364), 'tkinter.messagebox.showwarning', 'messagebox.showwarning', (['"""Send Message"""', '"""This message is already sent to all selected clients.\n Press NEW button to send a new message."""'], {}), '(\'Send Message\',\n """This message is already sent to all selected clients.\n Press NEW button to send a new message."""\n )\n', (4237, 4364), False, 'from tkinter import messagebox\n'), ((6863, 6903), 'tkinter.Canvas', 'tk.Canvas', (['cb_frame'], {'width': '(20)', 'height': '(28)'}), '(cb_frame, width=20, height=28)\n', (6872, 6903), True, 'import tkinter as tk\n'), ((7267, 7282), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (7280, 7282), True, 'import tkinter as tk\n'), ((7964, 8004), 'tkinter.Canvas', 'tk.Canvas', (['cb_frame'], {'width': '(20)', 'height': '(28)'}), '(cb_frame, width=20, height=28)\n', (7973, 8004), True, 'import tkinter as tk\n'), ((3773, 3786), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3783, 3786), False, 'import socket, threading, sys, time, os\n'), ((10061, 10071), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10069, 10071), False, 'import socket, threading, sys, time, os\n')]
|
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
# Return list of binary hex ids as list of UTF strings
def pretty_ids(ids):
pretty_list = []
for id in ids:
pretty_list.append(hex_to_utf8(id))
return pretty_list
# Return binary hex as UTF string
def hex_to_utf8(binary):
return binascii.hexlify(binary).decode("UTF-8")
def is_valid_hex_str(hex_str):
"""
Function to check given string is valid hex string or not
Parameter
- hex_str is string
Returns True if valid hex string otherwise False
"""
try:
int(hex_str, 16)
return True
except ValueError:
return False
def byte_array_to_hex_str(in_byte_array):
'''
Converts tuple of bytes to hex string
'''
return ''.join(format(i, '02x') for i in in_byte_array)
|
[
"binascii.hexlify"
] |
[((852, 876), 'binascii.hexlify', 'binascii.hexlify', (['binary'], {}), '(binary)\n', (868, 876), False, 'import binascii\n')]
|
import os,copy
from collections import OrderedDict
from pypospack.task.lammps import LammpsSimulation
class LammpsStructuralMinimization(LammpsSimulation):
""" Class for LAMMPS structural minimization
This data class defines additional attributes and methods necessary to
interact with the Workflow manager.
Args:
task_name(str): unique id for the task name being define
task_directory(str): the directory where this task will create
input and output files for LAMMPS
Attributes:
config
config_map
"""
def __init__(self,
task_name,
task_directory,
structure_filename,
restart=False,
fullauto=False):
_task_type = 'lmps_min_all'
LammpsSimulation.__init__(self,
task_name=task_name,
task_directory=task_directory,
task_type=_task_type,
structure_filename=structure_filename,
restart=restart,
fullauto=fullauto)
def postprocess(self):
LammpsSimulation.postprocess(self)
def lammps_input_file_to_string(self):
str_out = "".join([\
self._lammps_input_initialization_section(),
self._lammps_input_create_atoms(),
self._lammps_input_define_potential(),
self._lammps_input_run_minimization(),
self._lammps_input_out_section()])
return(str_out)
def on_post(self,configuration=None):
self.__get_results_from_lammps_outputfile()
LammpsSimulation.on_post(self,configuration=configuration)
def __get_results_from_lammps_outputfile(self):
_filename = os.path.join(
self.task_directory,
'lammps.out')
with open(_filename,'r') as f:
lines = f.readlines()
_variables = [
'tot_energy',
'num_atoms',
'xx','yy','zz','xy','xz','yz',
'tot_press',
'pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz',
]
_results = OrderedDict()
for i,line in enumerate(lines):
for name in _variables:
if line.startswith('{} = '.format(name)):
_results[name] = float(line.split('=')[1].strip())
if line.startswith('ERROR:'):
print('name:{}'.format(name))
print('line:{}'.format(line.strip))
raise NotImplementedError
_task_name = self.task_name
self.results = OrderedDict()
self.results['{}.{}'.format(_task_name,'toten')] = _results['tot_energy']
self.results['{}.{}'.format(_task_name,'natoms')] = _results['num_atoms']
# this only works for orthogonal cells
self.results['{}.{}'.format(_task_name,'a11')] = _results['xx']
self.results['{}.{}'.format(_task_name,'a12')] = 0
self.results['{}.{}'.format(_task_name,'a13')] = 0
self.results['{}.{}'.format(_task_name,'a21')] = 0
self.results['{}.{}'.format(_task_name,'a22')] = _results['yy']
self.results['{}.{}'.format(_task_name,'a23')] = 0
self.results['{}.{}'.format(_task_name,'a31')] = 0
self.results['{}.{}'.format(_task_name,'a32')] = 0
self.results['{}.{}'.format(_task_name,'a33')] = _results['zz']
self.results['{}.{}'.format(_task_name,'totpress')] = _results['tot_press']
self.results['{}.{}'.format(_task_name,'p11')] = _results['pxx']
self.results['{}.{}'.format(_task_name,'p12')] = _results['pxy']
self.results['{}.{}'.format(_task_name,'p13')] = _results['pxz']
self.results['{}.{}'.format(_task_name,'p21')] = _results['pxy']
self.results['{}.{}'.format(_task_name,'p22')] = _results['pyy']
self.results['{}.{}'.format(_task_name,'p23')] = _results['pyz'] #pyz=pzy
self.results['{}.{}'.format(_task_name,'p31')] = _results['pxz'] #pxz=pzx
self.results['{}.{}'.format(_task_name,'p32')] = _results['pyz']
self.results['{}.{}'.format(_task_name,'p33')] = _results['pzz']
def _lammps_input_run_minimization(self):
str_out = (
'# ---- define settings\n'
'compute eng all pe/atom\n'
'compute eatoms all reduce sum c_eng\n'
'# ---- run minimization\n'
'reset_timestep 0\n'
'fix 1 all box/relax iso 0.0 vmax 0.001\n'
'thermo 10\n'
'thermo_style custom step pe lx ly lz xy xz yz press pxx pyy pzz pxy pxz pyz c_eatoms\n'
# 'thermo_style custom step pe lx ly lz press pxx pyy pzz c_eatoms\n'
'min_style cg\n'
'minimize 1e-25 1e-25 5000 10000\n'
)
return str_out
|
[
"pypospack.task.lammps.LammpsSimulation.on_post",
"pypospack.task.lammps.LammpsSimulation.__init__",
"os.path.join",
"collections.OrderedDict",
"pypospack.task.lammps.LammpsSimulation.postprocess"
] |
[((782, 972), 'pypospack.task.lammps.LammpsSimulation.__init__', 'LammpsSimulation.__init__', (['self'], {'task_name': 'task_name', 'task_directory': 'task_directory', 'task_type': '_task_type', 'structure_filename': 'structure_filename', 'restart': 'restart', 'fullauto': 'fullauto'}), '(self, task_name=task_name, task_directory=\n task_directory, task_type=_task_type, structure_filename=\n structure_filename, restart=restart, fullauto=fullauto)\n', (807, 972), False, 'from pypospack.task.lammps import LammpsSimulation\n'), ((1095, 1129), 'pypospack.task.lammps.LammpsSimulation.postprocess', 'LammpsSimulation.postprocess', (['self'], {}), '(self)\n', (1123, 1129), False, 'from pypospack.task.lammps import LammpsSimulation\n'), ((1603, 1662), 'pypospack.task.lammps.LammpsSimulation.on_post', 'LammpsSimulation.on_post', (['self'], {'configuration': 'configuration'}), '(self, configuration=configuration)\n', (1627, 1662), False, 'from pypospack.task.lammps import LammpsSimulation\n'), ((1739, 1786), 'os.path.join', 'os.path.join', (['self.task_directory', '"""lammps.out"""'], {}), "(self.task_directory, 'lammps.out')\n", (1751, 1786), False, 'import os, copy\n'), ((2155, 2168), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2166, 2168), False, 'from collections import OrderedDict\n'), ((2648, 2661), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2659, 2661), False, 'from collections import OrderedDict\n')]
|
import json
import requests
def remove_repetidos(lista):
l = []
for i in lista:
if i not in l:
l.append(i)
l.sort()
return l
def busca_musicas(id_api):
r = requests.get(id_api+'index.js')
if r.status_code == 200:
reddit_data = json.loads(r.content)
musicas = reddit_data['artist']['lyrics']['item']
for id_music in musicas:
r2 = requests.get('https://api.vagalume.com.br/search.php?musid='+id_music['id'])
if r2.status_code == 200:
reddit_data = json.loads(r2.content)
limpa_string = reddit_data['mus'][0]['text'].replace('.', '') # Retira ponto final
limpa_string = limpa_string.replace(',', '') # Retira virgula
limpa_string = limpa_string.replace('?', '') # Retira ponto de interrogação
limpa_string = limpa_string.replace('!', '') # Retira ponto de exclamação
limpa_string = limpa_string.replace('(', '') # Retira abre parênteses
limpa_string = limpa_string.replace(')', '') # Retira fecha parênteses
limpa_string = limpa_string.replace('[', '') # Retira abre colchetes
limpa_string = limpa_string.replace(']', '') # Retira fecha colchetes
limpa_string = limpa_string.replace('{', '') # Retira abre chaves
limpa_string = limpa_string.replace('}', '') # Retira fecha chaves
limpa_string = limpa_string.replace('/', '') # Retira barra
limpa_string = limpa_string.replace('"', '') # Retira aspas duplas
lista = remove_repetidos(limpa_string.split())
for d in lista:
btl = '\"'+ d+ '\", '
arquivo = open('strings.txt','a')
arquivo.write(btl)
arquivo.close()
print('Salvo com Sucesso!')
###########################################################################################################################
link_cantor = input('Informe o link do cantor no Vagalume.com: ')
busca_musicas(link_cantor)
|
[
"json.loads",
"requests.get"
] |
[((199, 232), 'requests.get', 'requests.get', (["(id_api + 'index.js')"], {}), "(id_api + 'index.js')\n", (211, 232), False, 'import requests\n'), ((282, 303), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (292, 303), False, 'import json\n'), ((412, 490), 'requests.get', 'requests.get', (["('https://api.vagalume.com.br/search.php?musid=' + id_music['id'])"], {}), "('https://api.vagalume.com.br/search.php?musid=' + id_music['id'])\n", (424, 490), False, 'import requests\n'), ((557, 579), 'json.loads', 'json.loads', (['r2.content'], {}), '(r2.content)\n', (567, 579), False, 'import json\n')]
|
import logging
from typing import Optional
import requests
import datetime
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@dataclass
class Holiday:
title: str
date: str
day_of_week: str
day_of_week_text: str
def fetch_public_holiday(token: str, target_date: datetime.date) -> Optional[Holiday]:
response = requests.get(
url="https://api.kenall.jp/v1/holidays",
headers={"Authorization": f"Token {token}"},
params={"year": target_date.year},
)
target_date_str = str(target_date)
response_body = response.json()
logger.debug(response_body)
for holiday in response_body.get("data"):
if holiday.get("date") == target_date_str:
return Holiday(**holiday)
return None
def fetch_next_public_holiday(
token: str, target_date: datetime.date
) -> Optional[Holiday]:
response = requests.get(
url="https://api.kenall.jp/v1/holidays",
headers={"Authorization": f"Token {token}"},
params={"year": target_date.year},
)
response_body = response.json()
logger.debug(response_body)
for holiday in response_body.get("data"):
if datetime.date.fromisoformat(holiday.get("date")) >= target_date:
return Holiday(**holiday)
next_new_year_day = datetime.date(target_date.year + 1, 1, 1)
return fetch_next_public_holiday(next_new_year_day)
|
[
"requests.get",
"datetime.date",
"logging.getLogger"
] |
[((121, 148), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'import logging\n'), ((357, 495), 'requests.get', 'requests.get', ([], {'url': '"""https://api.kenall.jp/v1/holidays"""', 'headers': "{'Authorization': f'Token {token}'}", 'params': "{'year': target_date.year}"}), "(url='https://api.kenall.jp/v1/holidays', headers={\n 'Authorization': f'Token {token}'}, params={'year': target_date.year})\n", (369, 495), False, 'import requests\n'), ((895, 1033), 'requests.get', 'requests.get', ([], {'url': '"""https://api.kenall.jp/v1/holidays"""', 'headers': "{'Authorization': f'Token {token}'}", 'params': "{'year': target_date.year}"}), "(url='https://api.kenall.jp/v1/holidays', headers={\n 'Authorization': f'Token {token}'}, params={'year': target_date.year})\n", (907, 1033), False, 'import requests\n'), ((1312, 1353), 'datetime.date', 'datetime.date', (['(target_date.year + 1)', '(1)', '(1)'], {}), '(target_date.year + 1, 1, 1)\n', (1325, 1353), False, 'import datetime\n')]
|
from django import forms
from .models import UserProfile
class ProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['name', 'photo']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'photo': forms.FileInput(attrs={'class': 'form-control'}),
}
|
[
"django.forms.TextInput",
"django.forms.FileInput"
] |
[((213, 261), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (228, 261), False, 'from django import forms\n'), ((284, 332), 'django.forms.FileInput', 'forms.FileInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (299, 332), False, 'from django import forms\n')]
|
import operator
s1="Marlin"
s2="beard"
print("The Concatenated string is :",end="")
print(operator.concat(s1,s2))
#using contains() to check if s1 contains s2
if(operator.contains(s1, s2)):
print("Marlin Contains Beard")
else:
print("Marlin Does not contains Beard")
|
[
"operator.concat",
"operator.contains"
] |
[((162, 187), 'operator.contains', 'operator.contains', (['s1', 's2'], {}), '(s1, s2)\n', (179, 187), False, 'import operator\n'), ((90, 113), 'operator.concat', 'operator.concat', (['s1', 's2'], {}), '(s1, s2)\n', (105, 113), False, 'import operator\n')]
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def runif_check():
fr = h2o.H2OFrame([[r] for r in range(1,1001)])
runif1 = fr[0].runif(1234)
runif2 = fr[0].runif(1234)
runif3 = fr[0].runif(42)
assert (runif1 == runif2).all(), "Expected runif with the same seeds to return the same values."
assert not (runif1 == runif3).all(), "Expected runif with different seeds to return different values."
if __name__ == "__main__":
pyunit_utils.standalone_test(runif_check)
else:
runif_check()
|
[
"tests.pyunit_utils.standalone_test",
"sys.path.insert"
] |
[((49, 77), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../"""'], {}), "(1, '../../')\n", (64, 77), False, 'import sys\n'), ((510, 551), 'tests.pyunit_utils.standalone_test', 'pyunit_utils.standalone_test', (['runif_check'], {}), '(runif_check)\n', (538, 551), False, 'from tests import pyunit_utils\n')]
|
import arcpy
import pythonaddins
class ExplosionButtonClass(object):
"""Implementation for ExplosionAddin_addin.explosionbutton (Button)"""
def __init__(self):
self.enabled = True
self.checked = False
def onClick(self):
# Print message to confirm initialisation
#pythonaddins.MessageBox('Have you applied a definition query to all necessary layers?', 'Query check', 4)
pythonaddins.MessageBox("I am working", "Are you working?")
pythonaddins.GPToolDialog("E:/MSc/Advanced-Programming/GitHub/GEOG_5790/Practical2-Scripts/Explosion Toolbox (v2).tbx", "Explosion")
|
[
"pythonaddins.GPToolDialog",
"pythonaddins.MessageBox"
] |
[((427, 486), 'pythonaddins.MessageBox', 'pythonaddins.MessageBox', (['"""I am working"""', '"""Are you working?"""'], {}), "('I am working', 'Are you working?')\n", (450, 486), False, 'import pythonaddins\n'), ((496, 638), 'pythonaddins.GPToolDialog', 'pythonaddins.GPToolDialog', (['"""E:/MSc/Advanced-Programming/GitHub/GEOG_5790/Practical2-Scripts/Explosion Toolbox (v2).tbx"""', '"""Explosion"""'], {}), "(\n 'E:/MSc/Advanced-Programming/GitHub/GEOG_5790/Practical2-Scripts/Explosion Toolbox (v2).tbx'\n , 'Explosion')\n", (521, 638), False, 'import pythonaddins\n')]
|
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from wtscenario import make_scenarios, filter_scenarios
# test_rollback_to_stable25.py
# Check various scenarios relating to RLE cells in column-store.
#
# We write at three different timestamps:
# 10 - aaaaaa or none
# 20 - bbbbbb or delete or none
# 30 - cccccc or delete or none
#
# and we evict to push things to disk after any of these,
# and we roll back to either 15 or 25.
#
# The writes can be either uniform, heterogeneous, first key, middle key, or last key.
#
# We do this with a group of 5 keys 2..6. Keys 1 and 6 are written with zzzzzz at
# timestamp 5 and evicted to ensure that the group of keys we're using is isolated
# from other unused keys.
#
# This generates a lot of cases, but we filter pointless combinations and they run fast.
# Put these bits outside the class definition so they can be referred to both in class
# instances and in the scenario setup logic, which doesn't have a class instance yet.
my_rle_size = 5
def keys_of_write(write):
if write == 'u' or write == 'h':
return range(2, 2 + my_rle_size)
elif write == 'f':
return [2]
elif write == 'm':
return [2 + my_rle_size // 2]
else:
return [2 + my_rle_size - 1]
class test_rollback_to_stable25(wttest.WiredTigerTestCase):
conn_config = 'in_memory=false'
write_10_values = [
('10u', dict(write_10='u')),
('10h', dict(write_10='h')),
('10f', dict(write_10='f')),
('10m', dict(write_10='m')),
('10l', dict(write_10='l')),
]
type_10_values = [
('nil', dict(type_10=None)),
('upd', dict(type_10='upd')),
]
write_20_values = [
('20u', dict(write_20='u')),
('20h', dict(write_20='h')),
('20f', dict(write_20='f')),
('20m', dict(write_20='m')),
('20l', dict(write_20='l')),
]
type_20_values = [
('nil', dict(type_20=None)),
('upd', dict(type_20='upd')),
('del', dict(type_20='del')),
]
write_30_values = [
('30u', dict(write_30='u')),
('30h', dict(write_30='h')),
('30f', dict(write_30='f')),
('30m', dict(write_30='m')),
('30l', dict(write_30='l')),
]
type_30_values = [
('nil', dict(type_30=None)),
('upd', dict(type_30='upd')),
('del', dict(type_30='del')),
]
evict_time_values = [
('chk10', dict(evict_time=10)),
('chk20', dict(evict_time=20)),
('chk30', dict(evict_time=30)),
]
rollback_time_values = [
('roll15', dict(rollback_time=15)),
('roll25', dict(rollback_time=25)),
]
def is_meaningful(name, vals):
# The last write at evict time should be uniform, to get an RLE cell.
if vals['evict_time'] == 10 and vals['write_10'] != 'u':
return False
if vals['evict_time'] == 20 and vals['write_20'] != 'u':
return False
if vals['evict_time'] == 30 and vals['write_30'] != 'u':
return False
# If the type is nil, the value must be uniform.
if vals['type_10'] is None and vals['write_10'] != 'u':
return False
if vals['type_20'] is None and vals['write_20'] != 'u':
return False
if vals['type_30'] is None and vals['write_30'] != 'u':
return False
# Similarly, delete and heterogeneous doesn't make sense.
if vals['type_10'] == 'del' and vals['write_10'] == 'h':
return False
if vals['type_20'] == 'del' and vals['write_20'] == 'h':
return False
if vals['type_20'] == 'del' and vals['write_30'] == 'h':
return False
# Both 10 and 20 shouldn't be nil. That's equivalent to 10 and 30 being nil.
if vals['type_10'] is None and vals['type_20'] is None:
return False
# Avoid cases that delete nonexistent values.
def deletes_nonexistent():
present = {}
for k in range(2, 2 + my_rle_size):
present[k] = False
def adjust(ty, write):
if ty is None:
return
for k in keys_of_write(write):
if ty == 'upd':
present[k] = True
elif ty == 'del':
if present[k]:
present[k] = False
else:
raise KeyError
adjust(vals['type_10'], vals['write_10'])
adjust(vals['type_20'], vals['write_20'])
adjust(vals['type_30'], vals['write_30'])
try:
deletes_nonexistent()
except KeyError:
return False
return True
scenarios = filter_scenarios(make_scenarios(write_10_values, type_10_values,
write_20_values, type_20_values,
write_30_values, type_30_values,
evict_time_values,
rollback_time_values),
is_meaningful)
value_z = "zzzzz" * 10
def writes(self, uri, s, expected, ty, write, value, ts):
if ty is None:
# do nothing at all
return
cursor = s.open_cursor(uri)
s.begin_transaction()
for k in keys_of_write(write):
if ty == 'upd':
myval = value + str(k) if write == 'h' else value
cursor[k] = myval
expected[k] = myval
else:
cursor.set_key(k)
cursor.remove()
del expected[k]
s.commit_transaction('commit_timestamp=' + self.timestamp_str(ts))
cursor.close()
def evict(self, uri, s):
# Evict the page to force reconciliation.
evict_cursor = s.open_cursor(uri, None, "debug=(release_evict)")
s.begin_transaction()
# Search the key to evict it. Use both bookends.
v = evict_cursor[1]
self.assertEqual(v, self. value_z)
v = evict_cursor[2 + my_rle_size]
self.assertEqual(v, self. value_z)
self.assertEqual(evict_cursor.reset(), 0)
s.rollback_transaction()
evict_cursor.close()
def check(self, uri, s, ts, expected):
cursor = s.open_cursor(uri)
s.begin_transaction('read_timestamp=' + self.timestamp_str(ts))
# endpoints should still be in place
self.assertEqual(cursor[1], self.value_z)
self.assertEqual(cursor[2 + my_rle_size], self.value_z)
for k in range(2, 2 + my_rle_size):
if k in expected:
self.assertEqual(cursor[k], expected[k])
else:
cursor.set_key(k)
r = cursor.search()
self.assertEqual(r, wiredtiger.WT_NOTFOUND)
s.rollback_transaction()
cursor.close()
def test_rollback_to_stable25(self):
# Create a table without logging.
uri = "table:rollback_to_stable25"
self.session.create(uri, 'key_format=r,value_format=S')
# Pin oldest timestamp to 2.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(2))
# Start stable timestamp at 2.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(2))
value_a = "aaaaa" * 10
value_b = "bbbbb" * 10
value_c = "ccccc" * 10
s = self.conn.open_session()
# Write the endpoints at time 5.
cursor = s.open_cursor(uri)
s.begin_transaction()
cursor[1] = self.value_z
cursor[2 + my_rle_size] = self.value_z
s.commit_transaction('commit_timestamp=' + self.timestamp_str(5))
self.evict(uri, s)
cursor.close()
# Do writes at time 10.
expected = {}
self.writes(uri, s, expected, self.type_10, self.write_10, value_a, 10)
expected10 = expected.copy()
# Evict at time 10 if requested.
if self.evict_time == 10:
self.evict(uri, s)
# Do more writes at time 20.
self.writes(uri, s, expected, self.type_20, self.write_20, value_b, 20)
expected20 = expected.copy()
# Evict at time 20 if requested.
if self.evict_time == 20:
self.evict(uri, s)
# Do still more writes at time 30.
self.writes(uri, s, expected, self.type_30, self.write_30, value_c, 30)
expected30 = expected.copy()
# Evict at time 30 if requested.
if self.evict_time == 30:
self.evict(uri, s)
# Now roll back.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(self.rollback_time))
self.conn.rollback_to_stable()
if self.rollback_time < 20:
expected20 = expected10
expected30 = expected10
elif self.rollback_time < 30:
expected30 = expected20
# Now make sure we see what we expect.
self.check(uri, s, 10, expected10)
self.check(uri, s, 20, expected20)
self.check(uri, s, 30, expected30)
|
[
"wtscenario.make_scenarios"
] |
[((6094, 6256), 'wtscenario.make_scenarios', 'make_scenarios', (['write_10_values', 'type_10_values', 'write_20_values', 'type_20_values', 'write_30_values', 'type_30_values', 'evict_time_values', 'rollback_time_values'], {}), '(write_10_values, type_10_values, write_20_values,\n type_20_values, write_30_values, type_30_values, evict_time_values,\n rollback_time_values)\n', (6108, 6256), False, 'from wtscenario import make_scenarios, filter_scenarios\n')]
|
import os
import click
def SetVars(api_url, api_key):
'''
Takes API key and URL, sets them to environment variables
Parameters
----------
api_url : str
The SimScale API URL to call.
api_key : str
The SimScale API Key to use when calling, this is equivilent to
the users password for the API, it should never be printed.
'''
try:
os.environ["SIMSCALE_API_URL"] = str(api_url)
os.environ['SIMSCALE_API_KEY'] = str(api_key)
print("Your API key has ben set to the environment")
except:
raise Exception("Could not set environment variables")
@click.command("set-api-variables")
@click.argument(
'api-url',
type=str
)
@click.argument(
'api-key',
type=str
)
def set_variables(api_url, api_key):
SetVars(api_url, api_key)
|
[
"click.argument",
"click.command"
] |
[((638, 672), 'click.command', 'click.command', (['"""set-api-variables"""'], {}), "('set-api-variables')\n", (651, 672), False, 'import click\n'), ((674, 709), 'click.argument', 'click.argument', (['"""api-url"""'], {'type': 'str'}), "('api-url', type=str)\n", (688, 709), False, 'import click\n'), ((721, 756), 'click.argument', 'click.argument', (['"""api-key"""'], {'type': 'str'}), "('api-key', type=str)\n", (735, 756), False, 'import click\n')]
|
import os
from django import template
from django.utils.safestring import mark_safe
from repository.models import FileExt
register = template.Library()
generic = '<path fill-rule="evenodd" d="M4 0h8a2 2 0 0 1 2 2v12a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2zm0 1a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H4z"/>'
@register.simple_tag()
def get_file_icon(name):
filename, file_extension = os.path.splitext(name)
# remove the dot from extension
file_extension = file_extension[1:]
if file_extension is None or file_extension == '':
return (mark_safe(generic))
else:
try:
ext = FileExt.objects.get(name=file_extension.lower())
except FileExt.DoesNotExist:
return (mark_safe(generic))
return mark_safe(ext.type.svg_path)
|
[
"django.template.Library",
"os.path.splitext",
"django.utils.safestring.mark_safe"
] |
[((136, 154), 'django.template.Library', 'template.Library', ([], {}), '()\n', (152, 154), False, 'from django import template\n'), ((422, 444), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (438, 444), False, 'import os\n'), ((792, 820), 'django.utils.safestring.mark_safe', 'mark_safe', (['ext.type.svg_path'], {}), '(ext.type.svg_path)\n', (801, 820), False, 'from django.utils.safestring import mark_safe\n'), ((593, 611), 'django.utils.safestring.mark_safe', 'mark_safe', (['generic'], {}), '(generic)\n', (602, 611), False, 'from django.utils.safestring import mark_safe\n'), ((760, 778), 'django.utils.safestring.mark_safe', 'mark_safe', (['generic'], {}), '(generic)\n', (769, 778), False, 'from django.utils.safestring import mark_safe\n')]
|
import numpy as np
import re
import pandas as pd
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_sarc(input_file, training, sample_percent=1.0):
reddit = pd.read_csv(input_file)
sample_index = int(len(reddit) * sample_percent)
labels = reddit['label'].values
labels = labels[:sample_index]
labels = [[0, 1] if l == 1 else [1, 0] for l in labels]
split_index = int(len(labels) * 0.7)
train_labels, test_labels = labels[:split_index], labels[split_index:]
sarcastic = 0
for label in test_labels:
if label == [0, 1]: sarcastic += 1
# Process data
text = reddit['comment'].values
text = [str(x) for x in text]
text = text[:sample_index]
train_text, test_text = text[:split_index], text[split_index:]
return [train_text, np.array(train_labels)] if training else [test_text, np.array(test_labels)]
def load_data_ghosh(input_file):
with open(input_file) as f:
twitter = f.readlines()
twitter = [x.strip() for x in twitter]
twitter = pd.DataFrame(twitter)
new = twitter[0].str.split("\t", n = 2, expand = True)
twitter_labels = new[1]
twitter_text = new[2]
twitter_text = [tweet for tweet in twitter_text]
twitter_labels = [[0, 1] if l is '1' else [1, 0] for l in twitter_labels]
sarcastic = 0
for label in twitter_labels:
if label == [0, 1]: sarcastic += 1
#print("Sarcastic Count: %d" % sarcastic)
#print("Not Sarcastic Count: %d" % (len(twitter_labels)-sarcastic))
twitter_labels = np.array(twitter_labels)
return [twitter_text, twitter_labels]
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter_one_epoch(data, batch_size, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches = int((len(data)-1)/batch_size) + 1
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
print("Epoch: %d" % epoch)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
#load_data_sarc('data/train-balanced-sarcasm.csv', True)
#load_data_ghosh('data/ghosh/train.txt')
|
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"re.sub",
"numpy.concatenate"
] |
[((260, 306), 're.sub', 're.sub', (['"""[^A-Za-z0-9(),!?\\\\\'\\\\`]"""', '""" """', 'string'], {}), '("[^A-Za-z0-9(),!?\\\\\'\\\\`]", \' \', string)\n', (266, 306), False, 'import re\n'), ((319, 348), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (325, 348), False, 'import re\n'), ((363, 394), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (369, 394), False, 'import re\n'), ((409, 440), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (415, 440), False, 'import re\n'), ((455, 486), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (461, 486), False, 'import re\n'), ((501, 530), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (507, 530), False, 'import re\n'), ((545, 576), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (551, 576), False, 'import re\n'), ((591, 617), 're.sub', 're.sub', (['""","""', '""" , """', 'string'], {}), "(',', ' , ', string)\n", (597, 617), False, 'import re\n'), ((632, 658), 're.sub', 're.sub', (['"""!"""', '""" ! """', 'string'], {}), "('!', ' ! ', string)\n", (638, 658), False, 'import re\n'), ((673, 703), 're.sub', 're.sub', (['"""\\\\("""', '""" \\\\( """', 'string'], {}), "('\\\\(', ' \\\\( ', string)\n", (679, 703), False, 'import re\n'), ((716, 746), 're.sub', 're.sub', (['"""\\\\)"""', '""" \\\\) """', 'string'], {}), "('\\\\)', ' \\\\) ', string)\n", (722, 746), False, 'import re\n'), ((759, 789), 're.sub', 're.sub', (['"""\\\\?"""', '""" \\\\? """', 'string'], {}), "('\\\\?', ' \\\\? ', string)\n", (765, 789), False, 'import re\n'), ((802, 832), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (808, 832), False, 'import re\n'), ((943, 966), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (954, 966), True, 'import pandas as pd\n'), ((1814, 1835), 'pandas.DataFrame', 'pd.DataFrame', (['twitter'], {}), '(twitter)\n', (1826, 1835), True, 'import pandas as pd\n'), ((2319, 2343), 'numpy.array', 'np.array', (['twitter_labels'], {}), '(twitter_labels)\n', (2327, 2343), True, 'import numpy as np\n'), ((3199, 3252), 'numpy.concatenate', 'np.concatenate', (['[positive_labels, negative_labels]', '(0)'], {}), '([positive_labels, negative_labels], 0)\n', (3213, 3252), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3354, 3360), True, 'import numpy as np\n'), ((3957, 3971), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3965, 3971), True, 'import numpy as np\n'), ((1582, 1604), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1590, 1604), True, 'import numpy as np\n'), ((1635, 1656), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (1643, 1656), True, 'import numpy as np\n'), ((3508, 3528), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (3517, 3528), True, 'import numpy as np\n'), ((4244, 4264), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (4253, 4264), True, 'import numpy as np\n')]
|
import sys
class Memory:
def __init__(self):
self.memory = {}
#single byte
def get_address(self, address_str):
if address_str in self.memory:
return self.memory[addresss_str]
else:
print("memory not assigned, returning zero")
return '0'*8
#single byte
def set_address(self, address_str, value):
# print(f"setting memory address {address_str} to value {value}")
self.memory[address_str] = value
def get_byte(self, address_str):
return self.memory[address_str]
def get_halfword(self, address_str):
return "".join([self.memory[str(int(address_str)+i)] for i in range(2)])
def get_word(self, address_str):
# print(f"address to access: {address_str}")
return "".join([self.memory[str(int(address_str)+i)] for i in range(4)])
def get_doubleword(self, address_str):
# print(f"get double_word address_str: {address_str}")
return "".join([self.memory[str(int(address_str)+i)] for i in range(8)])
def store_byte(self, address_str, value):
if len(value) == 8:
self.memory[address_str]= value
else:
print(f"store_byte takes only 8 bit values, but got {len(value)}")
sys.exit(0)
def store_halfword(self, address_str, value):
if len(value) == 16:
for i in range(2):
self.memory[str(int(address_str) + i)] = value[i*8: 8 + i*8]
else:
print(f"store_halfword takes only 16 bit values, but got {len(value)}")
sys.exit(0)
def store_word(self, address_str, value):
if len(value) == 32:
for i in range(4):
self.memory[str(int(address_str) + i)] = value[i*8:8 + i*8]
else:
print(f"store_word takes only 32 bit values, but got {len(value)}")
sys.exit(0)
def store_doubleword(self, address_str, value):
if len(value) == 64:
for i in range(8):
self.memory[str(int(address_str) + i)] = value[i*8:8 + i*8]
else:
print(f"store_halfword takes only 64 bit values, but got {len(value)}")
sys.exit(0)
def set_n_bytes(self, address_str, value, n):
if len(value) // 8 == n:
for i in range(n):
self.store_byte(
str(int(address_str) + i*8),
value[i*8:8 + i*8]
)
else:
raise ValueError("length {len(value)} not a multiple of 8")
def get_string(self, address):
address = str(int(address, 2))
if address in self.memory:
string=""
while self.memory[address]!='0'*8:
char = chr(int(self.memory[address], 2))
string = string + char
address = str(int(address) + 1)
return string
else:
raise RuntimeError("get_string: address not in memory")
|
[
"sys.exit"
] |
[((1278, 1289), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1286, 1289), False, 'import sys\n'), ((1588, 1599), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1596, 1599), False, 'import sys\n'), ((1889, 1900), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1897, 1900), False, 'import sys\n'), ((2200, 2211), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2208, 2211), False, 'import sys\n')]
|
"""
Modified From https://github.com/OpenNMT/OpenNMT-tf/blob/r1/examples/library/minimal_transformer_training.py
MIT License
Copyright (c) 2017-present The OpenNMT Authors.
This example demonstrates how to train a standard Transformer model using
OpenNMT-tf as a library in about 200 lines of code. While relatively short,
this example contains some advanced concepts such as dataset bucketing and
prefetching, token-based batching, gradients accumulation, beam search, etc.
Currently, the beam search part is not easily customizable. This is expected to
be improved for TensorFlow 2.0 which is eager first.
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Use opennmt-tf-1.25.1
import argparse
import copy
from datetime import datetime
import numpy as np
import os
import sys
import tensorflow as tf
import opennmt as onmt
from opennmt import constants
from opennmt.utils import misc
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
from examples.tensorflow.decoding.utils.bleu_score import bleu_score
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
NUM_HEADS = 8
NUM_LAYERS = 6
HIDDEN_UNITS = 512
SIZE_PER_HEAD = 64
FFN_INNER_DIM = 2048
encoder = onmt.encoders.SelfAttentionEncoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
decoder = onmt.decoders.SelfAttentionDecoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
model_dir = args_dict["model_dir"]
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
sampling_topk = args_dict['sampling_topk']
sampling_topp = args_dict['sampling_topp']
tf_datatype = tf.float32
max_ite = args_dict['max_iteration']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
# Define the "base" Transformer model.
source_inputter = onmt.inputters.WordEmbedder("source_vocabulary", embedding_size=512, dtype=tf_datatype)
target_inputter = onmt.inputters.WordEmbedder("target_vocabulary", embedding_size=512, dtype=tf_datatype)
inputter = onmt.inputters.ExampleInputter(source_inputter, target_inputter)
inputter.initialize({
"source_vocabulary": args_dict["source_vocabulary"],
"target_vocabulary": args_dict["target_vocabulary"]
})
mode = tf.estimator.ModeKeys.PREDICT
np.random.seed(1)
tf.set_random_seed(1)
# Create the inference dataset.
dataset = inputter.make_inference_dataset(source_file, batch_size)
iterator = dataset.make_initializable_iterator()
source = iterator.get_next()
encoder_args = TransformerArgument(beam_width=1,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
remove_padding=True,
allow_gemm_test=False)
# Encode the source.
with tf.variable_scope("transformer/encoder"):
source_embedding = source_inputter.make_inputs(source)
source_embedding = tf.cast(source_embedding, tf_datatype)
# Using onmt fp16 for encoder.encode leads to significant accuracy drop
# So, we rewrite the encoder
# memory, _, _ = encoder.encode(source_embedding, source["length"], mode=mode)
memory = tf_encoder_opennmt(source_embedding, encoder_args, source["length"])
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = tf.cast(v, tf_datatype)
ft_encoder_result = ft_encoder_opennmt(inputs=source_embedding,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=source["length"])
# Generate the target.
with tf.variable_scope("transformer/decoder", reuse=tf.AUTO_REUSE):
target_inputter.build()
batch_size = tf.shape(memory)[0]
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
target_embedding = tf.cast(target_inputter.embedding, tf_datatype)
target_ids, _, target_length, _ = decoder.dynamic_decode_and_search(
target_embedding,
start_tokens,
end_token,
vocab_size=target_inputter.vocabulary_size,
beam_width=beam_size,
memory=memory,
memory_sequence_length=source["length"],
maximum_iterations=max_seq_len)
target_vocab_rev = target_inputter.vocabulary_lookup_reverse()
target_tokens = target_vocab_rev.lookup(tf.cast(target_ids, tf.int64))
decoder_args = TransformerArgument(beam_width=beam_size,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
kernel_init_range=0.00,
bias_init_range=0.00)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
ft_decoder_beamsearch_args = DecodingBeamsearchArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
ft_decoder_sampling_args = DecodingSamplingArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
decoding_beamsearch_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_sampling_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ft_target_ids, ft_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_beamsearch_args)
ft_target_tokens = target_vocab_rev.lookup(tf.cast(ft_target_ids, tf.int64))
ft_sampling_target_ids, ft_sampling_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_sampling_args)
ft_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_sampling_target_ids, tf.int64))
# ### TF Sampling Decoding ###
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=0)
# tf_sampling_target_tokens: [batch_size, seq_len]
tf_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(tf_sampling_target_ids, tf.int64))
# ### end of TF BeamSearch Decoding ###
### OP BeamSearch Decoder ###
ft_decoder_beamsearch_target_ids, ft_decoder_beamsearch_target_length, _, _, _ = tf_beamsearch_decoding(memory,
source["length"],
target_embedding,
ft_decoder_beamsearch_args,
decoder_type=1)
# ft_decoder_beamsearch_target_tokens: [batch_size, beam_width, seq_len]
ft_decoder_beamsearch_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_beamsearch_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
### OP Sampling Decoder ###
ft_decoder_sampling_target_ids, ft_decoder_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=1)
ft_decoder_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_sampling_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
class TranslationResult(object):
def __init__(self, token_op, length_op, name):
self.token_op = token_op
self.length_op = length_op
self.name = name
self.file_name = name + ".txt"
self.token_list = []
self.length_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.bleu_score = None
translation_result_list = []
if time_args != "":
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling-for-warmup"))
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult(
target_tokens, target_length, "tf-decoding-beamsearch"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_beamsearch_target_tokens, ft_decoder_beamsearch_target_length, "ft-decoder-beamsearch"))
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult(
ft_target_tokens, ft_target_length, "ft-decoding-beamsearch"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling"))
if time_args.find("4") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_sampling_target_tokens, ft_decoder_sampling_target_length, "ft-decoder-sampling"))
if time_args.find("5") != -1:
translation_result_list.append(TranslationResult(
ft_sampling_target_tokens, ft_sampling_target_length, "ft-decoding-sampling"))
# Iterates on the dataset.
float_checkpoint_path = tf.train.latest_checkpoint(model_dir)
half_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_fp16")
float_var_list = []
half_var_list = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float32:
float_var_list.append(var)
elif var.dtype.base_dtype == tf.float16:
half_var_list.append(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
for i in range(len(translation_result_list)):
with tf.Session(config=config) as sess:
if(len(float_var_list) > 0):
float_saver = tf.train.Saver(float_var_list)
float_saver.restore(sess, float_checkpoint_path)
if(len(half_var_list) > 0):
half_saver = tf.train.Saver(half_var_list)
half_saver.restore(sess, half_checkpoint_path)
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
t1 = datetime.now()
while True:
try:
batch_tokens, batch_length = sess.run([translation_result_list[i].token_op,
translation_result_list[i].length_op])
for tokens, length in zip(batch_tokens, batch_length):
# misc.print_bytes(b" ".join(tokens[0][:length[0] - 1]))
if translation_result_list[i].name.find("beamsearch") != -1:
translation_result_list[i].token_list.append(
b" ".join(tokens[0][:length[0] - 1]).decode("UTF-8"))
else:
translation_result_list[i].token_list.append(b" ".join(tokens[:length - 1]).decode("UTF-8"))
translation_result_list[i].batch_num += 1
if translation_result_list[i].name == "tf-decoding-sampling-for-warmup" and translation_result_list[i].batch_num > 20:
break
if translation_result_list[i].batch_num >= max_ite:
break
except tf.errors.OutOfRangeError:
break
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
translation_result_list[i].execution_time = time_sum
with open(translation_result_list[i].file_name, "w") as file_b:
for s in translation_result_list[i].token_list:
file_b.write(s)
file_b.write("\n")
ref_file_path = "./.ref_file.txt"
os.system("head -n %d %s > %s" % (len(translation_result_list[i].token_list), tgt_file, ref_file_path))
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].file_name, ref_file_path)
os.system("rm {}".format(ref_file_path))
for t in translation_result_list:
if t.name == "tf-decoding-sampling-for-warmup":
continue
print("[INFO] {} translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(
t.name, t.batch_num, t.execution_time, t.bleu_score.sys_len, t.bleu_score.score, t.bleu_score.sys_len / t.execution_time))
return translation_result_list
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument("--source_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the source vocabulary.")
parser.add_argument("--target_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="../translation/ckpt",
help="Directory where checkpoint are written.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoder_beamsearch
'2': test op_decoding_beamsearch
'3': test tf_decoding_sampling
'4': test op_decoder_sampling
'5': test op_decoding_sampling
'e.g., if you want to test op_decoder_beamsearch and op_decoding_sampling,
then you need to use -time '15' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beams earch.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
args = parser.parse_args()
translate(vars(args))
# example script
# python ../examples/tensorflow/decoding/translate_example.py --source ../examples/tensorflow/decoding/utils/translation/test.en --target ../examples/tensorflow/decoding/utils/translation/test.de --source_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --target_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --model_dir ../translation/ckpt/ -time 02
if __name__ == "__main__":
main()
|
[
"opennmt.encoders.SelfAttentionEncoder",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.get_collection",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.train.latest_checkpoint",
"examples.tensorflow.decoder.utils.common.DecodingArgumentNew",
"tensorflow.tables_initializer",
"examples.tensorflow.decoding.utils.bleu_score.bleu_score",
"examples.tensorflow.decoder.utils.decoding.tf_beamsearch_decoding",
"sys.path.append",
"tensorflow.variable_scope",
"tensorflow.set_random_seed",
"tensorflow.cast",
"datetime.datetime.now",
"copy.deepcopy",
"examples.tensorflow.encoder.utils.encoder.ft_encoder_opennmt",
"tensorflow.train.Saver",
"os.path.realpath",
"opennmt.inputters.WordEmbedder",
"tensorflow.Session",
"examples.tensorflow.decoder.utils.common.DecodingBeamsearchArgument",
"opennmt.inputters.ExampleInputter",
"examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding",
"examples.tensorflow.encoder.utils.encoder.tf_encoder_opennmt",
"examples.tensorflow.decoder.utils.common.TransformerArgument",
"tensorflow.fill",
"tensorflow.shape",
"examples.tensorflow.decoding.utils.ft_decoding.ft_decoding",
"examples.tensorflow.decoder.utils.common.DecodingSamplingArgument",
"opennmt.decoders.SelfAttentionDecoder"
] |
[((1522, 1561), 'sys.path.append', 'sys.path.append', (["(dir_path + '/../../..')"], {}), "(dir_path + '/../../..')\n", (1537, 1561), False, 'import sys\n'), ((2406, 2601), 'opennmt.encoders.SelfAttentionEncoder', 'onmt.encoders.SelfAttentionEncoder', ([], {'num_layers': 'NUM_LAYERS', 'num_units': 'HIDDEN_UNITS', 'num_heads': 'NUM_HEADS', 'ffn_inner_dim': 'FFN_INNER_DIM', 'dropout': '(0.1)', 'attention_dropout': '(0.1)', 'relu_dropout': '(0.1)'}), '(num_layers=NUM_LAYERS, num_units=\n HIDDEN_UNITS, num_heads=NUM_HEADS, ffn_inner_dim=FFN_INNER_DIM, dropout\n =0.1, attention_dropout=0.1, relu_dropout=0.1)\n', (2440, 2601), True, 'import opennmt as onmt\n'), ((2631, 2826), 'opennmt.decoders.SelfAttentionDecoder', 'onmt.decoders.SelfAttentionDecoder', ([], {'num_layers': 'NUM_LAYERS', 'num_units': 'HIDDEN_UNITS', 'num_heads': 'NUM_HEADS', 'ffn_inner_dim': 'FFN_INNER_DIM', 'dropout': '(0.1)', 'attention_dropout': '(0.1)', 'relu_dropout': '(0.1)'}), '(num_layers=NUM_LAYERS, num_units=\n HIDDEN_UNITS, num_heads=NUM_HEADS, ffn_inner_dim=FFN_INNER_DIM, dropout\n =0.1, attention_dropout=0.1, relu_dropout=0.1)\n', (2665, 2826), True, 'import opennmt as onmt\n'), ((1494, 1520), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1510, 1520), False, 'import os\n'), ((3719, 3811), 'opennmt.inputters.WordEmbedder', 'onmt.inputters.WordEmbedder', (['"""source_vocabulary"""'], {'embedding_size': '(512)', 'dtype': 'tf_datatype'}), "('source_vocabulary', embedding_size=512, dtype=\n tf_datatype)\n", (3746, 3811), True, 'import opennmt as onmt\n'), ((3829, 3921), 'opennmt.inputters.WordEmbedder', 'onmt.inputters.WordEmbedder', (['"""target_vocabulary"""'], {'embedding_size': '(512)', 'dtype': 'tf_datatype'}), "('target_vocabulary', embedding_size=512, dtype=\n tf_datatype)\n", (3856, 3921), True, 'import opennmt as onmt\n'), ((3933, 3997), 'opennmt.inputters.ExampleInputter', 'onmt.inputters.ExampleInputter', (['source_inputter', 'target_inputter'], {}), '(source_inputter, target_inputter)\n', (3963, 3997), True, 'import opennmt as onmt\n'), ((4199, 4216), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4213, 4216), True, 'import numpy as np\n'), ((4221, 4242), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (4239, 4242), True, 'import tensorflow as tf\n'), ((4457, 4676), 'examples.tensorflow.decoder.utils.common.TransformerArgument', 'TransformerArgument', ([], {'beam_width': '(1)', 'head_num': 'NUM_HEADS', 'size_per_head': 'SIZE_PER_HEAD', 'inter_size': '(NUM_HEADS * SIZE_PER_HEAD * 4)', 'num_layer': 'NUM_LAYERS', 'dtype': 'tf_datatype', 'remove_padding': '(True)', 'allow_gemm_test': '(False)'}), '(beam_width=1, head_num=NUM_HEADS, size_per_head=\n SIZE_PER_HEAD, inter_size=NUM_HEADS * SIZE_PER_HEAD * 4, num_layer=\n NUM_LAYERS, dtype=tf_datatype, remove_padding=True, allow_gemm_test=False)\n', (4476, 4676), False, 'from examples.tensorflow.decoder.utils.common import TransformerArgument\n'), ((11134, 11244), 'examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding', 'tf_sampling_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_sampling_args'], {'decoder_type': '(0)'}), "(memory, source['length'], target_embedding,\n ft_decoder_sampling_args, decoder_type=0)\n", (11154, 11244), False, 'from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding\n'), ((11868, 11982), 'examples.tensorflow.decoder.utils.decoding.tf_beamsearch_decoding', 'tf_beamsearch_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_beamsearch_args'], {'decoder_type': '(1)'}), "(memory, source['length'], target_embedding,\n ft_decoder_beamsearch_args, decoder_type=1)\n", (11890, 11982), False, 'from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding\n'), ((12754, 12864), 'examples.tensorflow.decoder.utils.decoding.tf_sampling_decoding', 'tf_sampling_decoding', (['memory', "source['length']", 'target_embedding', 'ft_decoder_sampling_args'], {'decoder_type': '(1)'}), "(memory, source['length'], target_embedding,\n ft_decoder_sampling_args, decoder_type=1)\n", (12774, 12864), False, 'from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding\n'), ((15223, 15260), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), '(model_dir)\n', (15249, 15260), True, 'import tensorflow as tf\n'), ((15288, 15335), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["(model_dir + '_fp16')"], {}), "(model_dir + '_fp16')\n", (15314, 15335), True, 'import tensorflow as tf\n'), ((15399, 15420), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (15418, 15420), True, 'import tensorflow as tf\n'), ((15609, 15625), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (15623, 15625), True, 'import tensorflow as tf\n'), ((18566, 18645), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (18589, 18645), False, 'import argparse\n'), ((4971, 5011), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer/encoder"""'], {}), "('transformer/encoder')\n", (4988, 5011), True, 'import tensorflow as tf\n'), ((5103, 5141), 'tensorflow.cast', 'tf.cast', (['source_embedding', 'tf_datatype'], {}), '(source_embedding, tf_datatype)\n', (5110, 5141), True, 'import tensorflow as tf\n'), ((5364, 5432), 'examples.tensorflow.encoder.utils.encoder.tf_encoder_opennmt', 'tf_encoder_opennmt', (['source_embedding', 'encoder_args', "source['length']"], {}), "(source_embedding, encoder_args, source['length'])\n", (5382, 5432), False, 'from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt\n'), ((5457, 5505), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (5474, 5505), True, 'import tensorflow as tf\n'), ((5670, 5820), 'examples.tensorflow.encoder.utils.encoder.ft_encoder_opennmt', 'ft_encoder_opennmt', ([], {'inputs': 'source_embedding', 'encoder_args': 'encoder_args', 'encoder_vars_dict': 'encoder_variables_dict', 'sequence_length': "source['length']"}), "(inputs=source_embedding, encoder_args=encoder_args,\n encoder_vars_dict=encoder_variables_dict, sequence_length=source['length'])\n", (5688, 5820), False, 'from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt\n'), ((5995, 6056), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer/decoder"""'], {'reuse': 'tf.AUTO_REUSE'}), "('transformer/decoder', reuse=tf.AUTO_REUSE)\n", (6012, 6056), True, 'import tensorflow as tf\n'), ((6154, 6207), 'tensorflow.fill', 'tf.fill', (['[batch_size]', 'constants.START_OF_SENTENCE_ID'], {}), '([batch_size], constants.START_OF_SENTENCE_ID)\n', (6161, 6207), True, 'import tensorflow as tf\n'), ((6284, 6331), 'tensorflow.cast', 'tf.cast', (['target_inputter.embedding', 'tf_datatype'], {}), '(target_inputter.embedding, tf_datatype)\n', (6291, 6331), True, 'import tensorflow as tf\n'), ((6876, 7103), 'examples.tensorflow.decoder.utils.common.TransformerArgument', 'TransformerArgument', ([], {'beam_width': 'beam_size', 'head_num': 'NUM_HEADS', 'size_per_head': 'SIZE_PER_HEAD', 'inter_size': '(NUM_HEADS * SIZE_PER_HEAD * 4)', 'num_layer': 'NUM_LAYERS', 'dtype': 'tf_datatype', 'kernel_init_range': '(0.0)', 'bias_init_range': '(0.0)'}), '(beam_width=beam_size, head_num=NUM_HEADS, size_per_head\n =SIZE_PER_HEAD, inter_size=NUM_HEADS * SIZE_PER_HEAD * 4, num_layer=\n NUM_LAYERS, dtype=tf_datatype, kernel_init_range=0.0, bias_init_range=0.0)\n', (6895, 7103), False, 'from examples.tensorflow.decoder.utils.common import TransformerArgument\n'), ((7419, 7446), 'copy.deepcopy', 'copy.deepcopy', (['decoder_args'], {}), '(decoder_args)\n', (7432, 7446), False, 'import copy\n'), ((7500, 7536), 'copy.deepcopy', 'copy.deepcopy', (['decoder_args.__dict__'], {}), '(decoder_args.__dict__)\n', (7513, 7536), False, 'import copy\n'), ((7629, 7814), 'examples.tensorflow.decoder.utils.common.DecodingBeamsearchArgument', 'DecodingBeamsearchArgument', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'decoder_args', 'beam_search_diversity_rate'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n decoder_args, beam_search_diversity_rate)\n', (7655, 7814), False, 'from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument\n'), ((8162, 8349), 'examples.tensorflow.decoder.utils.common.DecodingSamplingArgument', 'DecodingSamplingArgument', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'decoder_args_2', 'sampling_topk', 'sampling_topp'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n decoder_args_2, sampling_topk, sampling_topp)\n', (8186, 8349), False, 'from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument\n'), ((8737, 8923), 'examples.tensorflow.decoder.utils.common.DecodingArgumentNew', 'DecodingArgumentNew', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', 'beam_search_diversity_rate', '(0)', '(0.0)', 'decoder_args'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len,\n beam_search_diversity_rate, 0, 0.0, decoder_args)\n', (8756, 8923), False, 'from examples.tensorflow.decoder.utils.common import DecodingArgumentNew\n'), ((9334, 9521), 'examples.tensorflow.decoder.utils.common.DecodingArgumentNew', 'DecodingArgumentNew', (['target_inputter.vocabulary_size', 'constants.START_OF_SENTENCE_ID', 'constants.END_OF_SENTENCE_ID', 'max_seq_len', '(0.0)', 'sampling_topk', 'sampling_topp', 'decoder_args_2'], {}), '(target_inputter.vocabulary_size, constants.\n START_OF_SENTENCE_ID, constants.END_OF_SENTENCE_ID, max_seq_len, 0.0,\n sampling_topk, sampling_topp, decoder_args_2)\n', (9353, 9521), False, 'from examples.tensorflow.decoder.utils.common import DecodingArgumentNew\n'), ((9904, 9952), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (9921, 9952), True, 'import tensorflow as tf\n'), ((10004, 10110), 'examples.tensorflow.decoding.utils.ft_decoding.ft_decoding', 'ft_decoding', (['ft_encoder_result', "source['length']", 'target_embedding', 'all_vars', 'decoding_beamsearch_args'], {}), "(ft_encoder_result, source['length'], target_embedding, all_vars,\n decoding_beamsearch_args)\n", (10015, 10110), False, 'from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding\n'), ((10514, 10618), 'examples.tensorflow.decoding.utils.ft_decoding.ft_decoding', 'ft_decoding', (['ft_encoder_result', "source['length']", 'target_embedding', 'all_vars', 'decoding_sampling_args'], {}), "(ft_encoder_result, source['length'], target_embedding, all_vars,\n decoding_sampling_args)\n", (10525, 10618), False, 'from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding\n'), ((11661, 11702), 'tensorflow.cast', 'tf.cast', (['tf_sampling_target_ids', 'tf.int64'], {}), '(tf_sampling_target_ids, tf.int64)\n', (11668, 11702), True, 'import tensorflow as tf\n'), ((12555, 12606), 'tensorflow.cast', 'tf.cast', (['ft_decoder_beamsearch_target_ids', 'tf.int64'], {}), '(ft_decoder_beamsearch_target_ids, tf.int64)\n', (12562, 12606), True, 'import tensorflow as tf\n'), ((13298, 13347), 'tensorflow.cast', 'tf.cast', (['ft_decoder_sampling_target_ids', 'tf.int64'], {}), '(ft_decoder_sampling_target_ids, tf.int64)\n', (13305, 13347), True, 'import tensorflow as tf\n'), ((5618, 5641), 'tensorflow.cast', 'tf.cast', (['v', 'tf_datatype'], {}), '(v, tf_datatype)\n', (5625, 5641), True, 'import tensorflow as tf\n'), ((6111, 6127), 'tensorflow.shape', 'tf.shape', (['memory'], {}), '(memory)\n', (6119, 6127), True, 'import tensorflow as tf\n'), ((6821, 6850), 'tensorflow.cast', 'tf.cast', (['target_ids', 'tf.int64'], {}), '(target_ids, tf.int64)\n', (6828, 6850), True, 'import tensorflow as tf\n'), ((10410, 10442), 'tensorflow.cast', 'tf.cast', (['ft_target_ids', 'tf.int64'], {}), '(ft_target_ids, tf.int64)\n', (10417, 10442), True, 'import tensorflow as tf\n'), ((10999, 11040), 'tensorflow.cast', 'tf.cast', (['ft_sampling_target_ids', 'tf.int64'], {}), '(ft_sampling_target_ids, tf.int64)\n', (11006, 11040), True, 'import tensorflow as tf\n'), ((15732, 15757), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (15742, 15757), True, 'import tensorflow as tf\n'), ((16204, 16218), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16216, 16218), False, 'from datetime import datetime\n'), ((17439, 17453), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17451, 17453), False, 'from datetime import datetime\n'), ((17999, 18062), 'examples.tensorflow.decoding.utils.bleu_score.bleu_score', 'bleu_score', (['translation_result_list[i].file_name', 'ref_file_path'], {}), '(translation_result_list[i].file_name, ref_file_path)\n', (18009, 18062), False, 'from examples.tensorflow.decoding.utils.bleu_score import bleu_score\n'), ((15838, 15868), 'tensorflow.train.Saver', 'tf.train.Saver', (['float_var_list'], {}), '(float_var_list)\n', (15852, 15868), True, 'import tensorflow as tf\n'), ((16003, 16032), 'tensorflow.train.Saver', 'tf.train.Saver', (['half_var_list'], {}), '(half_var_list)\n', (16017, 16032), True, 'import tensorflow as tf\n'), ((16118, 16141), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (16139, 16141), True, 'import tensorflow as tf\n')]
|
import numpy as np
from Kuru import QuadratureRule, FunctionSpace , Mesh
from Kuru.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Kuru.VariationalPrinciple._GeometricStiffness_ import GeometricStiffnessIntegrand as GetGeomStiffness
from .DisplacementApproachIndices import FillGeometricB
#from ._MassIntegrand_ import __MassIntegrand__, __ConstantMassIntegrand__
__all__ = ["VariationalPrinciple"]
class VariationalPrinciple(object):
energy_dissipation = []
internal_energy = []
kinetic_energy = []
external_energy = []
power_dissipation = []
internal_power = []
kinetic_power = []
external_power = []
def __init__(self, mesh, variables_order=(1,0),
analysis_type='static', analysis_nature='nonlinear', fields='mechanics',
quadrature_rules=None, median=None, quadrature_type=None,
function_spaces=None, compute_post_quadrature=True):
self.variables_order = variables_order
self.nvar = None
self.ndim = mesh.points.shape[1]
if isinstance(self.variables_order,int):
self.variables_order = tuple(self.variables_order)
self.quadrature_rules = quadrature_rules
self.quadrature_type = quadrature_type
self.function_spaces = function_spaces
self.median = median
self.analysis_type = analysis_type
self.analysis_nature = analysis_nature
self.fields = fields
self.compute_post_quadrature = compute_post_quadrature
# GET NUMBER OF VARIABLES
self.GetNumberOfVariables()
def GetQuadratureOrder(self, C, element_type, quadrature_degree=None):
"""Finds quadrature degree/strength for a given polynomial order C=p-1 [where p is polynomial degree]"""
if quadrature_degree is None:
if element_type == "tri" or element_type == "tet":
norder = 2*C if C > 0 else 1
norder_post = 2*(C+1)
else:
norder = C+2
# ACTUAL
# norder_post = 2*(C+2)
# ALTHOUGH THIS INTEGRATES EXACTLY
norder_post = C+2
else:
norder = quadrature_degree
if element_type == "tri" or element_type == "tet":
norder_post = 2*quadrature_degree
else:
norder_post = quadrature_degree
return norder, norder_post
def GetQuadraturesAndFunctionSpaces(self, mesh, variables_order=(1,),
quadrature_rules=None, quadrature_type=None, function_spaces=None, compute_post_quadrature=True,
equally_spaced_bases=False, quadrature_degree=None):
""""The default function for computing quadrature rules and function spaces for equall order single
and multi-physics/fields problems"""
C = mesh.InferPolynomialDegree() - 1
mesh.InferBoundaryElementType()
if quadrature_rules == None and self.quadrature_rules == None:
# OPTION FOR QUADRATURE TECHNIQUE FOR TRIS AND TETS
optimal_quadrature = 3
if mesh.element_type == "quad" or mesh.element_type == "hex":
if quadrature_type == "wv":
optimal_quadrature = 4
norder, norder_post = self.GetQuadratureOrder(C, mesh.element_type, quadrature_degree=quadrature_degree)
# GET QUADRATURE
quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder, mesh_type=mesh.element_type)
if self.compute_post_quadrature:
# COMPUTE INTERPOLATION FUNCTIONS AT ALL INTEGRATION POINTS FOR POST-PROCESSING
post_quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder_post, mesh_type=mesh.element_type)
else:
post_quadrature = None
# BOUNDARY QUADRATURE
bquadrature = QuadratureRule(optimal=optimal_quadrature, norder=C+2, mesh_type=mesh.boundary_element_type)
self.quadrature_rules = (quadrature,post_quadrature,bquadrature)
else:
self.quadrature_rules = quadrature_rules
if function_spaces == None and self.function_spaces == None:
# CREATE FUNCTIONAL SPACES
function_space = FunctionSpace(mesh, self.quadrature_rules[0], p=C+1, equally_spaced=equally_spaced_bases)
if self.compute_post_quadrature:
post_function_space = FunctionSpace(mesh, self.quadrature_rules[1], p=C+1, equally_spaced=equally_spaced_bases)
else:
post_function_space = None
# CREATE BOUNDARY FUNCTIONAL SPACES
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
self.quadrature_rules[2], p=C+1, equally_spaced=equally_spaced_bases)
self.function_spaces = (function_space,post_function_space,bfunction_space)
else:
self.function_spaces = function_spaces
local_size = self.function_spaces[0].Bases.shape[0]*self.nvar
self.local_rows = np.repeat(np.arange(0,local_size),local_size,axis=0)
self.local_columns = np.tile(np.arange(0,local_size),local_size)
self.local_size = local_size
# FOR MASS
local_size_m = self.function_spaces[0].Bases.shape[0]*self.ndim
self.local_rows_mass = np.repeat(np.arange(0,local_size_m),local_size_m,axis=0)
self.local_columns_mass = np.tile(np.arange(0,local_size_m),local_size_m)
self.local_size_m = local_size_m
def GetNumberOfVariables(self):
"""Returns (self.nvar) i.e. number of variables/unknowns per node, for the formulation.
Note that self.nvar does not take into account the unknowns which get condensated
"""
# nvar = 0
# for i in self.variables_order:
# # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT
# if i!=0:
# if mesh.element_type == "tri":
# nvar += (i+1)*(i+2) // 2
# elif mesh.element_type == "tet":
# nvar += (i+1)*(i+2)*(i+3) // 6
# elif mesh.element_type == "quad":
# nvar += (i+1)**2
# elif mesh.element_type == "hex":
# nvar += (i+1)**3
# nvar = sum(self.variables_order)
if self.nvar == None:
self.nvar = self.ndim
return self.nvar
def FindIndices(self,A):
return self.local_rows, self.local_columns, A.ravel()
def GeometricStiffnessIntegrand(self, SpatialGradient, CauchyStressTensor):
"""Applies to displacement based, displacement potential based and all mixed
formulations that involve static condensation"""
ndim = self.ndim
nvar = self.nvar
B = np.zeros((nvar*SpatialGradient.shape[0],ndim*ndim))
S = np.zeros((ndim*ndim,ndim*ndim))
SpatialGradient = SpatialGradient.T.copy('c')
FillGeometricB(B,SpatialGradient,S,CauchyStressTensor,ndim,nvar)
BDB = np.dot(np.dot(B,S),B.T)
return BDB
def __GeometricStiffnessIntegrand__(self, SpatialGradient, CauchyStressTensor, detJ):
"""Applies to displacement based formulation"""
return GetGeomStiffness(np.ascontiguousarray(SpatialGradient),CauchyStressTensor, detJ, self.nvar)
def VolumetricStiffnessIntegrand(self, material, SpatialGradient, detJ, dV):
"""Computes the volumetric stiffness using Hu-Washizu on Mean Dilatation method"""
if material.has_low_level_dispatcher:
from ._VolumetricStiffness_ import _VolumetricStiffnessIntegrand_
stiffness, MeanVolume = _VolumetricStiffnessIntegrand_(material,
np.ascontiguousarray(SpatialGradient), np.ascontiguousarray(detJ),
np.ascontiguousarray(dV), self.nvar)
else:
MaterialVolume = np.sum(dV)
if material.has_state_variables and material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,material.id_growth])
CurrentElasticVolume = np.sum(dve)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,dve)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,dve)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif material.has_state_variables and not material.has_growth_remodeling:
CurrentElasticVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,detJ)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif not material.has_state_variables and not material.has_growth_remodeling:
CurrentVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageSpatialGradient = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageSpatialGradient = AverageSpatialGradient.flatten()
stiffness = np.einsum('i,j->ij',AverageSpatialGradient,AverageSpatialGradient)
MeanVolume = (CurrentVolume-MaterialVolume)/MaterialVolume
stiffness = np.true_divide(stiffness,MaterialVolume)
material.pressure = material.kappa*MeanVolume
stiffness *= material.kappa
return stiffness
|
[
"numpy.sum",
"numpy.true_divide",
"numpy.zeros",
"Kuru.FunctionSpace",
"numpy.einsum",
"Kuru.QuadratureRule",
"numpy.arange",
"numpy.dot",
"numpy.ascontiguousarray"
] |
[((6804, 6860), 'numpy.zeros', 'np.zeros', (['(nvar * SpatialGradient.shape[0], ndim * ndim)'], {}), '((nvar * SpatialGradient.shape[0], ndim * ndim))\n', (6812, 6860), True, 'import numpy as np\n'), ((6868, 6904), 'numpy.zeros', 'np.zeros', (['(ndim * ndim, ndim * ndim)'], {}), '((ndim * ndim, ndim * ndim))\n', (6876, 6904), True, 'import numpy as np\n'), ((3416, 3507), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': 'norder', 'mesh_type': 'mesh.element_type'}), '(optimal=optimal_quadrature, norder=norder, mesh_type=mesh.\n element_type)\n', (3430, 3507), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((3888, 3987), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': '(C + 2)', 'mesh_type': 'mesh.boundary_element_type'}), '(optimal=optimal_quadrature, norder=C + 2, mesh_type=mesh.\n boundary_element_type)\n', (3902, 3987), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((4265, 4361), 'Kuru.FunctionSpace', 'FunctionSpace', (['mesh', 'self.quadrature_rules[0]'], {'p': '(C + 1)', 'equally_spaced': 'equally_spaced_bases'}), '(mesh, self.quadrature_rules[0], p=C + 1, equally_spaced=\n equally_spaced_bases)\n', (4278, 4361), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((5070, 5094), 'numpy.arange', 'np.arange', (['(0)', 'local_size'], {}), '(0, local_size)\n', (5079, 5094), True, 'import numpy as np\n'), ((5150, 5174), 'numpy.arange', 'np.arange', (['(0)', 'local_size'], {}), '(0, local_size)\n', (5159, 5174), True, 'import numpy as np\n'), ((5356, 5382), 'numpy.arange', 'np.arange', (['(0)', 'local_size_m'], {}), '(0, local_size_m)\n', (5365, 5382), True, 'import numpy as np\n'), ((5445, 5471), 'numpy.arange', 'np.arange', (['(0)', 'local_size_m'], {}), '(0, local_size_m)\n', (5454, 5471), True, 'import numpy as np\n'), ((7050, 7062), 'numpy.dot', 'np.dot', (['B', 'S'], {}), '(B, S)\n', (7056, 7062), True, 'import numpy as np\n'), ((7267, 7304), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['SpatialGradient'], {}), '(SpatialGradient)\n', (7287, 7304), True, 'import numpy as np\n'), ((7896, 7906), 'numpy.sum', 'np.sum', (['dV'], {}), '(dV)\n', (7902, 7906), True, 'import numpy as np\n'), ((10128, 10169), 'numpy.true_divide', 'np.true_divide', (['stiffness', 'MaterialVolume'], {}), '(stiffness, MaterialVolume)\n', (10142, 10169), True, 'import numpy as np\n'), ((3678, 3774), 'Kuru.QuadratureRule', 'QuadratureRule', ([], {'optimal': 'optimal_quadrature', 'norder': 'norder_post', 'mesh_type': 'mesh.element_type'}), '(optimal=optimal_quadrature, norder=norder_post, mesh_type=\n mesh.element_type)\n', (3692, 3774), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((4438, 4534), 'Kuru.FunctionSpace', 'FunctionSpace', (['mesh', 'self.quadrature_rules[1]'], {'p': '(C + 1)', 'equally_spaced': 'equally_spaced_bases'}), '(mesh, self.quadrature_rules[1], p=C + 1, equally_spaced=\n equally_spaced_bases)\n', (4451, 4534), False, 'from Kuru import QuadratureRule, FunctionSpace, Mesh\n'), ((7733, 7770), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['SpatialGradient'], {}), '(SpatialGradient)\n', (7753, 7770), True, 'import numpy as np\n'), ((7772, 7798), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['detJ'], {}), '(detJ)\n', (7792, 7798), True, 'import numpy as np\n'), ((7816, 7840), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dV'], {}), '(dV)\n', (7836, 7840), True, 'import numpy as np\n'), ((8009, 8077), 'numpy.true_divide', 'np.true_divide', (['detJ', 'material.StateVariables[:, material.id_growth]'], {}), '(detJ, material.StateVariables[:, material.id_growth])\n', (8023, 8077), True, 'import numpy as np\n'), ((8115, 8126), 'numpy.sum', 'np.sum', (['dve'], {}), '(dve)\n', (8121, 8126), True, 'import numpy as np\n'), ((8277, 8376), 'numpy.einsum', 'np.einsum', (['"""i,ijk,i->jk"""', 'material.StateVariables[:, material.id_density]', 'SpatialGradient', 'dve'], {}), "('i,ijk,i->jk', material.StateVariables[:, material.id_density],\n SpatialGradient, dve)\n", (8286, 8376), True, 'import numpy as np\n'), ((8475, 8519), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'dve'], {}), "('ijk,i->jk', SpatialGradient, dve)\n", (8484, 8519), True, 'import numpy as np\n'), ((8614, 8676), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageDeformationv', 'AverageDeformationu'], {}), "('i,j->ij', AverageDeformationv, AverageDeformationu)\n", (8623, 8676), True, 'import numpy as np\n'), ((8882, 8894), 'numpy.sum', 'np.sum', (['detJ'], {}), '(detJ)\n', (8888, 8894), True, 'import numpy as np\n'), ((9045, 9145), 'numpy.einsum', 'np.einsum', (['"""i,ijk,i->jk"""', 'material.StateVariables[:, material.id_density]', 'SpatialGradient', 'detJ'], {}), "('i,ijk,i->jk', material.StateVariables[:, material.id_density],\n SpatialGradient, detJ)\n", (9054, 9145), True, 'import numpy as np\n'), ((9244, 9289), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'detJ'], {}), "('ijk,i->jk', SpatialGradient, detJ)\n", (9253, 9289), True, 'import numpy as np\n'), ((9384, 9446), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageDeformationv', 'AverageDeformationu'], {}), "('i,j->ij', AverageDeformationv, AverageDeformationu)\n", (9393, 9446), True, 'import numpy as np\n'), ((9649, 9661), 'numpy.sum', 'np.sum', (['detJ'], {}), '(detJ)\n', (9655, 9661), True, 'import numpy as np\n'), ((9815, 9860), 'numpy.einsum', 'np.einsum', (['"""ijk,i->jk"""', 'SpatialGradient', 'detJ'], {}), "('ijk,i->jk', SpatialGradient, detJ)\n", (9824, 9860), True, 'import numpy as np\n'), ((9961, 10029), 'numpy.einsum', 'np.einsum', (['"""i,j->ij"""', 'AverageSpatialGradient', 'AverageSpatialGradient'], {}), "('i,j->ij', AverageSpatialGradient, AverageSpatialGradient)\n", (9970, 10029), True, 'import numpy as np\n')]
|
"""Tests for the azure_lightning_flask application"""
import unittest
from mock import patch
from azure.common import AzureMissingResourceHttpError
from azure_lightning_flask.application import create_app
from tests.test_config import TestConfig
class TestApplication(unittest.TestCase):
"""Tests for the azure_lightning_flask application"""
class ContentRowResponse(object): # pylint: disable=too-few-public-methods
"""Simulated content response from Azure Table"""
content = '<html></html>'
class ActiveRowResponse(object): # pylint: disable=too-few-public-methods
"""Simulated active row response indicating active revision"""
content = '{0}:ActiveRevision'.format(TestConfig.APP_NAME)
def setUp(self):
self.active_row_reponse = self.ActiveRowResponse()
self.content_row_response = self.ContentRowResponse()
self.default_revision_responses = iter([
self.ActiveRowResponse(),
self.ContentRowResponse()
])
self.app = create_app(TestConfig)
self.client = self.app.test_client()
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_valid_revision(self, mock_get_entity):
"""Test the application returns the revision specified"""
mock_get_entity.return_value = self.content_row_response
revision = 'TestRevision'
url = '/?{0}={1}'.format(TestConfig.REVISION_PARAMETER, revision)
response = self.client.get(url)
row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)
mock_get_entity.assert_called_once_with(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
row_key
)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_default_revision(self, mock_get_entity):
"""Test the application returns the active revision when no revision is specified"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_invalid_revision(self, mock_get_entity):
"""Test the application returns a 404 response when a specified revision can't be found"""
mock_get_entity.side_effect = AzureMissingResourceHttpError("Not Found", 404)
url = '/?{0}=InvalidRevsion'.format(TestConfig.REVISION_PARAMETER)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_empty_revision(self, mock_get_entity):
"""Test that an empty/blank but specified revision returns the active revision"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/?{0}='.format(TestConfig.REVISION_PARAMETER)
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_nonroot_default_revision(self, mock_get_entity):
"""Test the application handles and responds correctly to arbitrary paths"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/directory/much/deep/wow'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_revision_with_additional_parameters(self, mock_get_entity): # pylint: disable=C0103
"""Test the application returns a requested revision even among other query parameters"""
mock_get_entity.return_value = self.content_row_response
revision = 'TestRevision'
url = '/?index_key=123&{0}={1}&revision=456'.format(
TestConfig.REVISION_PARAMETER,
revision
)
response = self.client.get(url)
row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)
mock_get_entity.assert_called_once_with(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
row_key
)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_default_with_parameters(self, mock_get_entity):
"""Test that the application ignores query parameters that are not requesting a revision"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/?index_key=123&&revision=456'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
|
[
"azure_lightning_flask.application.create_app",
"mock.patch",
"azure.common.AzureMissingResourceHttpError"
] |
[((1109, 1171), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (1114, 1171), False, 'from mock import patch\n'), ((1890, 1952), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (1895, 1952), False, 'from mock import patch\n'), ((2868, 2930), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (2873, 2930), False, 'from mock import patch\n'), ((3350, 3412), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (3355, 3412), False, 'from mock import patch\n'), ((4366, 4428), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (4371, 4428), False, 'from mock import patch\n'), ((5362, 5424), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (5367, 5424), False, 'from mock import patch\n'), ((6276, 6338), 'mock.patch', 'patch', (['"""azure_lightning_flask.helpers.TableService.get_entity"""'], {}), "('azure_lightning_flask.helpers.TableService.get_entity')\n", (6281, 6338), False, 'from mock import patch\n'), ((1035, 1057), 'azure_lightning_flask.application.create_app', 'create_app', (['TestConfig'], {}), '(TestConfig)\n', (1045, 1057), False, 'from azure_lightning_flask.application import create_app\n'), ((3127, 3174), 'azure.common.AzureMissingResourceHttpError', 'AzureMissingResourceHttpError', (['"""Not Found"""', '(404)'], {}), "('Not Found', 404)\n", (3156, 3174), False, 'from azure.common import AzureMissingResourceHttpError\n')]
|
from flask import Flask, request
from flask_apscheduler import APScheduler
import json
import socket
import datetime
import mqManagementClass
class Config(object):
# 任务列表
JOBS = [
{
'id': 'heratbeat',
'func': '__main__:sendHeartbeat', #执行函数
'args': None,
'trigger': 'interval',
'seconds': 10, #间隔时间(S)
}
]
app = Flask(__name__)
app.config.from_object(Config())
@app.route("/sendHeartbeat", methods=["POST"])
def sendHeartbeat():
userName = socket.gethostname()
time = datetime.datetime.now()
timestamp = time.strftime("%Y%m%d%H%M%S%f")
inputJson ={
"destination": "192.168.4.16",
"timestamp" : timestamp,
"user" : userName,
"cmd" : None
}
nodeIP = inputJson["destination"]
mqMaster = mqManagementClass.mqManagement()
ifSuccess = mqMaster.sentCmdToNode(nodeIP , inputJson)
print(ifSuccess)
outJson = {
"success": ifSuccess
}
return json.dumps(outJson)
if __name__ == '__main__':
scheduler=APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run(debug=False)
|
[
"flask_apscheduler.APScheduler",
"flask.Flask",
"json.dumps",
"socket.gethostname",
"mqManagementClass.mqManagement",
"datetime.datetime.now"
] |
[((427, 442), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (432, 442), False, 'from flask import Flask, request\n'), ((566, 586), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (584, 586), False, 'import socket\n'), ((604, 627), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (625, 627), False, 'import datetime\n'), ((899, 931), 'mqManagementClass.mqManagement', 'mqManagementClass.mqManagement', ([], {}), '()\n', (929, 931), False, 'import mqManagementClass\n'), ((1076, 1095), 'json.dumps', 'json.dumps', (['outJson'], {}), '(outJson)\n', (1086, 1095), False, 'import json\n'), ((1142, 1155), 'flask_apscheduler.APScheduler', 'APScheduler', ([], {}), '()\n', (1153, 1155), False, 'from flask_apscheduler import APScheduler\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="weishaupt-wcm-com",
version="0.0.10",
author="<NAME>",
author_email="<EMAIL>",
description="Interfacing the Weishaupt WCM-COM module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/schmiegelt/Py-Weishaupt-WCM-COM",
install_requires=["requests"],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
|
[
"setuptools.find_packages"
] |
[((467, 493), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (491, 493), False, 'import setuptools\n')]
|
# Generated by Django 2.2.9 on 2020-02-17 16:47
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('home', '0010_homepage_testimonial'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='about_iati_description',
field=models.TextField(default='', help_text='Description for the about IATI section'),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_en',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_es',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_fr',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_pt',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label',
field=models.CharField(default='', help_text='Link label for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_en',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_es',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_fr',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_pt',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_page',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title',
field=models.CharField(default='', help_text='Title for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_en',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_es',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_fr',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_pt',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video',
field=models.URLField(default='', help_text='Video embed URL for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_en',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_es',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_fr',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_pt',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description',
field=models.CharField(default='', help_text='Description for the activities statistics section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='activities_description_en',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_es',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_fr',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_pt',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title',
field=models.CharField(default='', help_text='Title for the getting started section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_en',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_es',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_fr',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_pt',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_en',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_es',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_fr',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_pt',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section'),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_en',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_es',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_fr',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_pt',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title',
field=models.CharField(default='', help_text='Title for the IATI in action section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_en',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_es',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_fr',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_pt',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title',
field=models.CharField(default='', help_text='Title for the IATI tools section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section'),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_en',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_es',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_fr',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_pt',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_en',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_es',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_fr',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_pt',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label',
field=models.CharField(default='', help_text='Label for the view all news button', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_en',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_es',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_fr',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_pt',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title',
field=models.CharField(default='', help_text='Title for the latest new section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_en',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_es',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_fr',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_pt',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title',
field=models.CharField(default='', help_text='Title for the latest news Twitter section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_en',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_es',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_fr',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_pt',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description',
field=models.CharField(default='', help_text='Description for the organisations statistics section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='organisations_description_en',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_es',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_fr',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_pt',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.CreateModel(
name='IATIToolsItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_tools_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='IATIInActionItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255)),
('title_en', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_fr', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_es', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_pt', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('description', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255)),
('description_en', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_fr', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_es', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_pt', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_in_action_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='IATIInActionFeaturedItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255)),
('title_en', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_fr', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_es', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_pt', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('description', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255)),
('description_en', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_fr', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_es', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_pt', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('quote', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255)),
('quote_en', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_fr', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_es', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_pt', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quotee', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255)),
('quotee_en', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_fr', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_es', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_pt', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('image', models.ForeignKey(blank=True, help_text='Optional: image for the item. Defaults to the selected page image if left blank', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_in_action_featured_item', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='GettingStartedItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(help_text='Title for the item', max_length=255)),
('title_en', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_fr', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_es', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_pt', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('description', models.CharField(help_text='Description for the item', max_length=255)),
('description_en', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_fr', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_es', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_pt', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('link_label', models.CharField(help_text='Link label for the item', max_length=255)),
('link_label_en', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_fr', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_es', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_pt', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('image', models.ForeignKey(help_text='Image for the item', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='getting_started_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
[
"django.db.models.TextField",
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((542, 627), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'help_text': '"""Description for the about IATI section"""'}), "(default='', help_text='Description for the about IATI section'\n )\n", (558, 627), False, 'from django.db import migrations, models\n'), ((799, 878), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Description for the about IATI section"""', 'null': '(True)'}), "(help_text='Description for the about IATI section', null=True)\n", (815, 878), False, 'from django.db import migrations, models\n'), ((1019, 1098), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Description for the about IATI section"""', 'null': '(True)'}), "(help_text='Description for the about IATI section', null=True)\n", (1035, 1098), False, 'from django.db import migrations, models\n'), ((1239, 1318), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Description for the about IATI section"""', 'null': '(True)'}), "(help_text='Description for the about IATI section', null=True)\n", (1255, 1318), False, 'from django.db import migrations, models\n'), ((1459, 1538), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Description for the about IATI section"""', 'null': '(True)'}), "(help_text='Description for the about IATI section', null=True)\n", (1475, 1538), False, 'from django.db import migrations, models\n'), ((1675, 1775), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Link label for the about IATI section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Link label for the about IATI section', max_length=255)\n", (1691, 1775), False, 'from django.db import migrations, models\n'), ((1946, 2044), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the about IATI section',\n max_length=255, null=True)\n", (1962, 2044), False, 'from django.db import migrations, models\n'), ((2180, 2278), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the about IATI section',\n max_length=255, null=True)\n", (2196, 2278), False, 'from django.db import migrations, models\n'), ((2414, 2512), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the about IATI section',\n max_length=255, null=True)\n", (2430, 2512), False, 'from django.db import migrations, models\n'), ((2648, 2746), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the about IATI section',\n max_length=255, null=True)\n", (2664, 2746), False, 'from django.db import migrations, models\n'), ((2873, 2992), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='+', to='wagtailcore.Page')\n", (2890, 2992), False, 'from django.db import migrations, models\n'), ((3120, 3214), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the about IATI section"""', 'max_length': '(255)'}), "(default='', help_text='Title for the about IATI section',\n max_length=255)\n", (3136, 3214), False, 'from django.db import migrations, models\n'), ((3381, 3475), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the about IATI section', max_length=\n 255, null=True)\n", (3397, 3475), False, 'from django.db import migrations, models\n'), ((3605, 3699), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the about IATI section', max_length=\n 255, null=True)\n", (3621, 3699), False, 'from django.db import migrations, models\n'), ((3829, 3923), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the about IATI section', max_length=\n 255, null=True)\n", (3845, 3923), False, 'from django.db import migrations, models\n'), ((4053, 4147), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the about IATI section', max_length=\n 255, null=True)\n", (4069, 4147), False, 'from django.db import migrations, models\n'), ((4274, 4378), 'django.db.models.URLField', 'models.URLField', ([], {'default': '""""""', 'help_text': '"""Video embed URL for the about IATI section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Video embed URL for the about IATI section', max_length=255)\n", (4289, 4378), False, 'from django.db import migrations, models\n'), ((4544, 4646), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Video embed URL for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Video embed URL for the about IATI section',\n max_length=255, null=True)\n", (4559, 4646), False, 'from django.db import migrations, models\n'), ((4777, 4879), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Video embed URL for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Video embed URL for the about IATI section',\n max_length=255, null=True)\n", (4792, 4879), False, 'from django.db import migrations, models\n'), ((5010, 5112), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Video embed URL for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Video embed URL for the about IATI section',\n max_length=255, null=True)\n", (5025, 5112), False, 'from django.db import migrations, models\n'), ((5243, 5345), 'django.db.models.URLField', 'models.URLField', ([], {'help_text': '"""Video embed URL for the about IATI section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Video embed URL for the about IATI section',\n max_length=255, null=True)\n", (5258, 5345), False, 'from django.db import migrations, models\n'), ((5479, 5591), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Description for the activities statistics section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Description for the activities statistics section', max_length=255)\n", (5495, 5591), False, 'from django.db import migrations, models\n'), ((5763, 5878), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the activities statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the activities statistics section', max_length=255,\n null=True)\n", (5779, 5878), False, 'from django.db import migrations, models\n'), ((6010, 6125), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the activities statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the activities statistics section', max_length=255,\n null=True)\n", (6026, 6125), False, 'from django.db import migrations, models\n'), ((6257, 6372), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the activities statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the activities statistics section', max_length=255,\n null=True)\n", (6273, 6372), False, 'from django.db import migrations, models\n'), ((6504, 6619), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the activities statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the activities statistics section', max_length=255,\n null=True)\n", (6520, 6619), False, 'from django.db import migrations, models\n'), ((6747, 6847), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the getting started section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Title for the getting started section', max_length=255)\n", (6763, 6847), False, 'from django.db import migrations, models\n'), ((7018, 7116), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the getting started section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the getting started section',\n max_length=255, null=True)\n", (7034, 7116), False, 'from django.db import migrations, models\n'), ((7252, 7350), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the getting started section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the getting started section',\n max_length=255, null=True)\n", (7268, 7350), False, 'from django.db import migrations, models\n'), ((7486, 7584), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the getting started section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the getting started section',\n max_length=255, null=True)\n", (7502, 7584), False, 'from django.db import migrations, models\n'), ((7720, 7818), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the getting started section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the getting started section',\n max_length=255, null=True)\n", (7736, 7818), False, 'from django.db import migrations, models\n'), ((7945, 8059), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""Optional: video embed URL for page header"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: video embed URL for page header', max_length=255, null=True)\n", (7960, 8059), False, 'from django.db import migrations, models\n'), ((8185, 8299), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""Optional: video embed URL for page header"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: video embed URL for page header', max_length=255, null=True)\n", (8200, 8299), False, 'from django.db import migrations, models\n'), ((8425, 8539), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""Optional: video embed URL for page header"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: video embed URL for page header', max_length=255, null=True)\n", (8440, 8539), False, 'from django.db import migrations, models\n'), ((8665, 8779), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'help_text': '"""Optional: video embed URL for page header"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: video embed URL for page header', max_length=255, null=True)\n", (8680, 8779), False, 'from django.db import migrations, models\n'), ((8916, 9015), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI in action section"""'}), "(blank=True, help_text=\n 'Optional: description for the IATI in action section')\n", (8932, 9015), False, 'from django.db import migrations, models\n'), ((9155, 9265), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI in action section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI in action section', null=True)\n", (9171, 9265), False, 'from django.db import migrations, models\n'), ((9405, 9515), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI in action section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI in action section', null=True)\n", (9421, 9515), False, 'from django.db import migrations, models\n'), ((9655, 9765), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI in action section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI in action section', null=True)\n", (9671, 9765), False, 'from django.db import migrations, models\n'), ((9905, 10015), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI in action section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI in action section', null=True)\n", (9921, 10015), False, 'from django.db import migrations, models\n'), ((10146, 10245), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the IATI in action section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Title for the IATI in action section', max_length=255)\n", (10162, 10245), False, 'from django.db import migrations, models\n'), ((10415, 10512), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI in action section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI in action section',\n max_length=255, null=True)\n", (10431, 10512), False, 'from django.db import migrations, models\n'), ((10647, 10744), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI in action section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI in action section',\n max_length=255, null=True)\n", (10663, 10744), False, 'from django.db import migrations, models\n'), ((10879, 10976), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI in action section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI in action section',\n max_length=255, null=True)\n", (10895, 10976), False, 'from django.db import migrations, models\n'), ((11111, 11208), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI in action section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI in action section',\n max_length=255, null=True)\n", (11127, 11208), False, 'from django.db import migrations, models\n'), ((11336, 11430), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the IATI tools section"""', 'max_length': '(255)'}), "(default='', help_text='Title for the IATI tools section',\n max_length=255)\n", (11352, 11430), False, 'from django.db import migrations, models\n'), ((11606, 11701), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI tools section"""'}), "(blank=True, help_text=\n 'Optional: description for the IATI tools section')\n", (11622, 11701), False, 'from django.db import migrations, models\n'), ((11843, 11949), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI tools section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI tools section', null=True)\n", (11859, 11949), False, 'from django.db import migrations, models\n'), ((12091, 12197), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI tools section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI tools section', null=True)\n", (12107, 12197), False, 'from django.db import migrations, models\n'), ((12339, 12445), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI tools section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI tools section', null=True)\n", (12355, 12445), False, 'from django.db import migrations, models\n'), ((12587, 12693), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the IATI tools section"""', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the IATI tools section', null=True)\n", (12603, 12693), False, 'from django.db import migrations, models\n'), ((12823, 12917), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI tools section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI tools section', max_length=\n 255, null=True)\n", (12839, 12917), False, 'from django.db import migrations, models\n'), ((13047, 13141), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI tools section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI tools section', max_length=\n 255, null=True)\n", (13063, 13141), False, 'from django.db import migrations, models\n'), ((13271, 13365), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI tools section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI tools section', max_length=\n 255, null=True)\n", (13287, 13365), False, 'from django.db import migrations, models\n'), ((13495, 13589), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the IATI tools section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the IATI tools section', max_length=\n 255, null=True)\n", (13511, 13589), False, 'from django.db import migrations, models\n'), ((13722, 13818), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Label for the view all news button"""', 'max_length': '(255)'}), "(default='', help_text='Label for the view all news button',\n max_length=255)\n", (13738, 13818), False, 'from django.db import migrations, models\n'), ((13991, 14087), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Label for the view all news button"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Label for the view all news button', max_length\n =255, null=True)\n", (14007, 14087), False, 'from django.db import migrations, models\n'), ((14223, 14319), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Label for the view all news button"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Label for the view all news button', max_length\n =255, null=True)\n", (14239, 14319), False, 'from django.db import migrations, models\n'), ((14455, 14551), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Label for the view all news button"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Label for the view all news button', max_length\n =255, null=True)\n", (14471, 14551), False, 'from django.db import migrations, models\n'), ((14687, 14783), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Label for the view all news button"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Label for the view all news button', max_length\n =255, null=True)\n", (14703, 14783), False, 'from django.db import migrations, models\n'), ((14911, 15005), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the latest new section"""', 'max_length': '(255)'}), "(default='', help_text='Title for the latest new section',\n max_length=255)\n", (14927, 15005), False, 'from django.db import migrations, models\n'), ((15173, 15267), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest new section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest new section', max_length=\n 255, null=True)\n", (15189, 15267), False, 'from django.db import migrations, models\n'), ((15398, 15492), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest new section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest new section', max_length=\n 255, null=True)\n", (15414, 15492), False, 'from django.db import migrations, models\n'), ((15623, 15717), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest new section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest new section', max_length=\n 255, null=True)\n", (15639, 15717), False, 'from django.db import migrations, models\n'), ((15848, 15942), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest new section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest new section', max_length=\n 255, null=True)\n", (15864, 15942), False, 'from django.db import migrations, models\n'), ((16077, 16181), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Title for the latest news Twitter section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Title for the latest news Twitter section', max_length=255)\n", (16093, 16181), False, 'from django.db import migrations, models\n'), ((16355, 16457), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest news Twitter section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest news Twitter section',\n max_length=255, null=True)\n", (16371, 16457), False, 'from django.db import migrations, models\n'), ((16596, 16698), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest news Twitter section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest news Twitter section',\n max_length=255, null=True)\n", (16612, 16698), False, 'from django.db import migrations, models\n'), ((16837, 16939), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest news Twitter section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest news Twitter section',\n max_length=255, null=True)\n", (16853, 16939), False, 'from django.db import migrations, models\n'), ((17078, 17180), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the latest news Twitter section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the latest news Twitter section',\n max_length=255, null=True)\n", (17094, 17180), False, 'from django.db import migrations, models\n'), ((17317, 17432), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Description for the organisations statistics section"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Description for the organisations statistics section', max_length=255)\n", (17333, 17432), False, 'from django.db import migrations, models\n'), ((17607, 17725), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the organisations statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the organisations statistics section', max_length=255,\n null=True)\n", (17623, 17725), False, 'from django.db import migrations, models\n'), ((17860, 17978), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the organisations statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the organisations statistics section', max_length=255,\n null=True)\n", (17876, 17978), False, 'from django.db import migrations, models\n'), ((18113, 18231), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the organisations statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the organisations statistics section', max_length=255,\n null=True)\n", (18129, 18231), False, 'from django.db import migrations, models\n'), ((18366, 18484), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the organisations statistics section"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text=\n 'Description for the organisations statistics section', max_length=255,\n null=True)\n", (18382, 18484), False, 'from django.db import migrations, models\n'), ((18599, 18692), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (18615, 18692), False, 'from django.db import migrations, models\n'), ((18722, 18780), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'editable': '(False)', 'null': '(True)'}), '(blank=True, editable=False, null=True)\n', (18741, 18780), False, 'from django.db import migrations, models\n'), ((18965, 19120), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Page link for the item"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(help_text='Page link for the item', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')\n", (18982, 19120), False, 'from django.db import migrations, models\n'), ((19374, 19467), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (19390, 19467), False, 'from django.db import migrations, models\n'), ((19497, 19555), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'editable': '(False)', 'null': '(True)'}), '(blank=True, editable=False, null=True)\n', (19516, 19555), False, 'from django.db import migrations, models\n'), ((19584, 19731), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255)\n", (19600, 19731), False, 'from django.db import migrations, models\n'), ((19753, 19911), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (19769, 19911), False, 'from django.db import migrations, models\n'), ((19933, 20091), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (19949, 20091), False, 'from django.db import migrations, models\n'), ((20113, 20271), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (20129, 20271), False, 'from django.db import migrations, models\n'), ((20293, 20451), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (20309, 20451), False, 'from django.db import migrations, models\n'), ((20476, 20631), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255)\n", (20492, 20631), False, 'from django.db import migrations, models\n'), ((20659, 20825), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (20675, 20825), False, 'from django.db import migrations, models\n'), ((20853, 21019), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (20869, 21019), False, 'from django.db import migrations, models\n'), ((21047, 21213), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (21063, 21213), False, 'from django.db import migrations, models\n'), ((21241, 21407), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (21257, 21407), False, 'from django.db import migrations, models\n'), ((21586, 21741), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Page link for the item"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(help_text='Page link for the item', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')\n", (21603, 21741), False, 'from django.db import migrations, models\n'), ((22003, 22096), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (22019, 22096), False, 'from django.db import migrations, models\n'), ((22126, 22184), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'editable': '(False)', 'null': '(True)'}), '(blank=True, editable=False, null=True)\n', (22145, 22184), False, 'from django.db import migrations, models\n'), ((22213, 22360), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255)\n", (22229, 22360), False, 'from django.db import migrations, models\n'), ((22382, 22540), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (22398, 22540), False, 'from django.db import migrations, models\n'), ((22562, 22720), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (22578, 22720), False, 'from django.db import migrations, models\n'), ((22742, 22900), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (22758, 22900), False, 'from django.db import migrations, models\n'), ((22922, 23080), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: title for the item. Defaults to the selected page title if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: title for the item. Defaults to the selected page title if left blank'\n , max_length=255, null=True)\n", (22938, 23080), False, 'from django.db import migrations, models\n'), ((23105, 23260), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255)\n", (23121, 23260), False, 'from django.db import migrations, models\n'), ((23288, 23454), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (23304, 23454), False, 'from django.db import migrations, models\n'), ((23482, 23648), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (23498, 23648), False, 'from django.db import migrations, models\n'), ((23676, 23842), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (23692, 23842), False, 'from django.db import migrations, models\n'), ((23870, 24036), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: description for the item. Defaults to the selected page excerpt if left blank"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text=\n 'Optional: description for the item. Defaults to the selected page excerpt if left blank'\n , max_length=255, null=True)\n", (23886, 24036), False, 'from django.db import migrations, models\n'), ((24055, 24145), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: quote for the item"""', 'max_length': '(255)'}), "(blank=True, help_text='Optional: quote for the item',\n max_length=255)\n", (24071, 24145), False, 'from django.db import migrations, models\n'), ((24173, 24274), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: quote for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: quote for the item',\n max_length=255, null=True)\n", (24189, 24274), False, 'from django.db import migrations, models\n'), ((24302, 24403), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: quote for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: quote for the item',\n max_length=255, null=True)\n", (24318, 24403), False, 'from django.db import migrations, models\n'), ((24431, 24532), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: quote for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: quote for the item',\n max_length=255, null=True)\n", (24447, 24532), False, 'from django.db import migrations, models\n'), ((24560, 24661), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: quote for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: quote for the item',\n max_length=255, null=True)\n", (24576, 24661), False, 'from django.db import migrations, models\n'), ((24687, 24782), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: the source of the quote"""', 'max_length': '(255)'}), "(blank=True, help_text='Optional: the source of the quote',\n max_length=255)\n", (24703, 24782), False, 'from django.db import migrations, models\n'), ((24811, 24917), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: the source of the quote"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: the source of the quote',\n max_length=255, null=True)\n", (24827, 24917), False, 'from django.db import migrations, models\n'), ((24946, 25052), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: the source of the quote"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: the source of the quote',\n max_length=255, null=True)\n", (24962, 25052), False, 'from django.db import migrations, models\n'), ((25081, 25187), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: the source of the quote"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: the source of the quote',\n max_length=255, null=True)\n", (25097, 25187), False, 'from django.db import migrations, models\n'), ((25216, 25322), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Optional: the source of the quote"""', 'max_length': '(255)', 'null': '(True)'}), "(blank=True, help_text='Optional: the source of the quote',\n max_length=255, null=True)\n", (25232, 25322), False, 'from django.db import migrations, models\n'), ((25347, 25585), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Optional: image for the item. Defaults to the selected page image if left blank"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailimages.Image"""'}), "(blank=True, help_text=\n 'Optional: image for the item. Defaults to the selected page image if left blank'\n , null=True, on_delete=django.db.models.deletion.SET_NULL, related_name\n ='+', to='wagtailimages.Image')\n", (25364, 25585), False, 'from django.db import migrations, models\n'), ((25767, 25922), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Page link for the item"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(help_text='Page link for the item', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')\n", (25784, 25922), False, 'from django.db import migrations, models\n'), ((26178, 26271), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (26194, 26271), False, 'from django.db import migrations, models\n'), ((26301, 26359), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'editable': '(False)', 'null': '(True)'}), '(blank=True, editable=False, null=True)\n', (26320, 26359), False, 'from django.db import migrations, models\n'), ((26388, 26452), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the item"""', 'max_length': '(255)'}), "(help_text='Title for the item', max_length=255)\n", (26404, 26452), False, 'from django.db import migrations, models\n'), ((26484, 26559), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the item', max_length=255, null=True)\n", (26500, 26559), False, 'from django.db import migrations, models\n'), ((26591, 26666), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the item', max_length=255, null=True)\n", (26607, 26666), False, 'from django.db import migrations, models\n'), ((26698, 26773), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the item', max_length=255, null=True)\n", (26714, 26773), False, 'from django.db import migrations, models\n'), ((26805, 26880), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Title for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Title for the item', max_length=255, null=True)\n", (26821, 26880), False, 'from django.db import migrations, models\n'), ((26915, 26985), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the item"""', 'max_length': '(255)'}), "(help_text='Description for the item', max_length=255)\n", (26931, 26985), False, 'from django.db import migrations, models\n'), ((27023, 27109), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Description for the item', max_length=255, null\n =True)\n", (27039, 27109), False, 'from django.db import migrations, models\n'), ((27142, 27228), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Description for the item', max_length=255, null\n =True)\n", (27158, 27228), False, 'from django.db import migrations, models\n'), ((27261, 27347), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Description for the item', max_length=255, null\n =True)\n", (27277, 27347), False, 'from django.db import migrations, models\n'), ((27380, 27466), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Description for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Description for the item', max_length=255, null\n =True)\n", (27396, 27466), False, 'from django.db import migrations, models\n'), ((27495, 27564), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the item"""', 'max_length': '(255)'}), "(help_text='Link label for the item', max_length=255)\n", (27511, 27564), False, 'from django.db import migrations, models\n'), ((27601, 27686), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the item', max_length=255, null=True\n )\n", (27617, 27686), False, 'from django.db import migrations, models\n'), ((27718, 27803), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the item', max_length=255, null=True\n )\n", (27734, 27803), False, 'from django.db import migrations, models\n'), ((27835, 27920), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the item', max_length=255, null=True\n )\n", (27851, 27920), False, 'from django.db import migrations, models\n'), ((27952, 28037), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Link label for the item"""', 'max_length': '(255)', 'null': '(True)'}), "(help_text='Link label for the item', max_length=255, null=True\n )\n", (27968, 28037), False, 'from django.db import migrations, models\n'), ((28061, 28221), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Image for the item"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailimages.Image"""'}), "(help_text='Image for the item', null=True, on_delete=\n django.db.models.deletion.SET_NULL, related_name='+', to=\n 'wagtailimages.Image')\n", (28078, 28221), False, 'from django.db import migrations, models\n'), ((28401, 28556), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Page link for the item"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""'}), "(help_text='Page link for the item', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')\n", (28418, 28556), False, 'from django.db import migrations, models\n')]
|
from scrapy import signals
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils import database_exists, create_database
from sqlalchemy.orm import object_mapper
from .mapper import ItemsModelMapper
import os
DeclarativeBase = declarative_base()
# https://www.python.org/download/releases/2.2/descrintro/#__new__
class Singleton(object):
def __new__(cls, *args, **kwds):
it = cls.__dict__.get("__it__")
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
it.init(*args, **kwds)
return it
def init(self, *args, **kwds):
pass
class DatabasePipeline(Singleton):
def __init__(self, settings, items=None, model=None, database=None, database_dev=None):
if database:
self.database = database
elif settings:
self.database = settings.get("DATABASE")
self.database["query"]["charset"] = 'utf8mb4'
if database_dev:
self.database_dev = database_dev
elif settings:
self.database_dev = settings.get("DATABASE_DEV")
self.database_dev["query"]["charset"] = 'utf8mb4'
self.session = self.get_session()
if items and model:
self.mapper = ItemsModelMapper(items=items, model=model)
@classmethod
def from_crawler(cls, crawler):
pipeline = cls(crawler.settings)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
crawler.database_session = pipeline.session
return pipeline
def get_session(self):
engine = self.create_engine()
self.create_tables(engine)
return self.create_session(engine)
def create_engine(self):
if "PRODUCTION" in os.environ:
engine = create_engine(URL(**self.database))
else:
engine = create_engine(URL(**self.database_dev))
if not database_exists(engine.url):
create_database(engine.url)
return engine
def create_tables(self, engine):
DeclarativeBase.metadata.create_all(engine, checkfirst=True)
def create_session(self, engine):
session = sessionmaker(bind=engine, autoflush=False)() # autoflush=False: "This is useful when initializing a series of objects which involve existing database queries, where the uncompleted object should not yet be flushed." for instance when using the Association Object Pattern
return session
def spider_closed(self, spider):
self.session.close()
def process_item(self, item, spider):
obj = self.mapper.map_to_model(item=item, sess=self.session)
try:
self.session.add(obj)
self.session.commit()
# Set potentially missing primary keys (autoincrement) for the item
mapper = object_mapper(obj)
for key, value in zip(mapper.primary_key, mapper.primary_key_from_instance(obj)):
item[key.name] = value
except:
self.session.rollback()
raise
finally:
self.session.close()
return item
|
[
"sqlalchemy.orm.object_mapper",
"sqlalchemy_utils.create_database",
"sqlalchemy_utils.database_exists",
"sqlalchemy.engine.url.URL",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.sessionmaker"
] |
[((367, 385), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (383, 385), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2033, 2060), 'sqlalchemy_utils.database_exists', 'database_exists', (['engine.url'], {}), '(engine.url)\n', (2048, 2060), False, 'from sqlalchemy_utils import database_exists, create_database\n'), ((2074, 2101), 'sqlalchemy_utils.create_database', 'create_database', (['engine.url'], {}), '(engine.url)\n', (2089, 2101), False, 'from sqlalchemy_utils import database_exists, create_database\n'), ((2288, 2330), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine', 'autoflush': '(False)'}), '(bind=engine, autoflush=False)\n', (2300, 2330), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2956, 2974), 'sqlalchemy.orm.object_mapper', 'object_mapper', (['obj'], {}), '(obj)\n', (2969, 2974), False, 'from sqlalchemy.orm import object_mapper\n'), ((1921, 1941), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**self.database)\n', (1924, 1941), False, 'from sqlalchemy.engine.url import URL\n'), ((1992, 2016), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**self.database_dev)\n', (1995, 2016), False, 'from sqlalchemy.engine.url import URL\n')]
|
from geopy import Point
from magicbox_distance import shapefile, distance
right_angle_start = Point(1.0, 1.0)
right_angle_middle = Point(1.1, 1.0)
right_angle_end = Point(1.1, 1.1)
right_angle_distance = distance.using_latitude_and_longitude(right_angle_start, right_angle_middle) + \
distance.using_latitude_and_longitude(right_angle_middle, right_angle_end)
def create_shapefile(roads):
return [shapefile.create_record(index, shapefile.ShapeType.POLYLINE, [road]) for index, road in enumerate(roads)]
def create_part(*args):
return shapefile.create_part(1, list(args))
|
[
"magicbox_distance.shapefile.create_record",
"magicbox_distance.distance.using_latitude_and_longitude",
"geopy.Point"
] |
[((96, 111), 'geopy.Point', 'Point', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (101, 111), False, 'from geopy import Point\n'), ((133, 148), 'geopy.Point', 'Point', (['(1.1)', '(1.0)'], {}), '(1.1, 1.0)\n', (138, 148), False, 'from geopy import Point\n'), ((167, 182), 'geopy.Point', 'Point', (['(1.1)', '(1.1)'], {}), '(1.1, 1.1)\n', (172, 182), False, 'from geopy import Point\n'), ((207, 283), 'magicbox_distance.distance.using_latitude_and_longitude', 'distance.using_latitude_and_longitude', (['right_angle_start', 'right_angle_middle'], {}), '(right_angle_start, right_angle_middle)\n', (244, 283), False, 'from magicbox_distance import shapefile, distance\n'), ((311, 385), 'magicbox_distance.distance.using_latitude_and_longitude', 'distance.using_latitude_and_longitude', (['right_angle_middle', 'right_angle_end'], {}), '(right_angle_middle, right_angle_end)\n', (348, 385), False, 'from magicbox_distance import shapefile, distance\n'), ((429, 497), 'magicbox_distance.shapefile.create_record', 'shapefile.create_record', (['index', 'shapefile.ShapeType.POLYLINE', '[road]'], {}), '(index, shapefile.ShapeType.POLYLINE, [road])\n', (452, 497), False, 'from magicbox_distance import shapefile, distance\n')]
|
#!/usr/bin/env python3
from typing import List, Dict
import lib
from lib import StatTracker
from lib.common import group, calc_stat
import collections
import matplotlib.pyplot as plt
import os
plots = collections.OrderedDict()
plots["Full"] = "analyzer/baseline/validation/"
plots["$+$"] = "analyzer/add/validation/"
plots["$\\neg +$"] = "inverse_mask_test/add/"
plots["$*$"] = "analyzer/mul/validation/"
plots["$\\neg *$"] = "inverse_mask_test/mul/"
names = list(plots.keys())
ops = ["add","mul"]
def plot_both(ff, rnn):
ff_stats = calc_stat({"a":ff}, lambda k: (k.startswith("analyzer/") and k.endswith("/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"]
rnn_stats = calc_stat({"a":rnn}, lambda k: (k.startswith("analyzer/") and k.endswith("/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"]
fig = plt.figure(figsize=[6,1.6])
for t in range(2):
this_ff_stats = [ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
means_ff = [s.mean * 100 for s in this_ff_stats]
std_ff = [s.std * 100 for s in this_ff_stats]
plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center')
for t in range(2):
this_rnn_stats = [rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
means_rnn = [s.mean * 100 for s in this_rnn_stats]
std_rnn = [s.std * 100 for s in this_rnn_stats]
plt.bar([5.5 * r + 1+ t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center')
plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names)
plt.ylabel("Accuracy [\\%]")
plt.legend(["FNN $+$", "FNN $*$", "RNN $+$", "RNN $*$"])
fname = "out/admmul_performance.pdf"
os.makedirs(os.path.dirname(fname), exist_ok=True)
fig.savefig(fname, bbox_inches='tight')
print("\\begin{tabular}{ll|c|cc|cc}")
print("\\toprule")
print(" & ".join(["", ""] + names) + " \\\\")
print("\\midrule")
row = ["\\multirow{2}{*}{FNN}"]
for t in range(2):
this_stats = [ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
row.append(f"Pair {t}")
for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]):
row.append(f"${m:.0f} \pm {s:.1f}$")
print(" & ".join(row) + " \\\\")
row = [""]
print("\\midrule")
row = ["\\multirow{2}{*}{LSTM}"]
for t in range(2):
this_stats = [rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names]
row.append(f"Pair {t}")
for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]):
row.append(f"${m:.0f} \pm {s:.1f}$")
print(" & ".join(row) + " \\\\")
row = [""]
print("\\bottomrule")
print("\end{tabular}")
rnn_runs = lib.get_runs(["addmul_rnn"])
feedforward_runs = lib.get_runs(["addmul_feedforward_big"])
feedforward_runs = group(feedforward_runs, ["layer_sizes"])
rnn_runs = group(rnn_runs, ["tuple.mode"])
plot_both(feedforward_runs["layer_sizes_2000,2000,2000,2000"], rnn_runs["tuple.mode_together"])
|
[
"matplotlib.pyplot.legend",
"os.path.dirname",
"matplotlib.pyplot.figure",
"lib.get_runs",
"collections.OrderedDict",
"matplotlib.pyplot.ylabel",
"lib.common.group"
] |
[((203, 228), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (226, 228), False, 'import collections\n'), ((2935, 2963), 'lib.get_runs', 'lib.get_runs', (["['addmul_rnn']"], {}), "(['addmul_rnn'])\n", (2947, 2963), False, 'import lib\n'), ((2983, 3023), 'lib.get_runs', 'lib.get_runs', (["['addmul_feedforward_big']"], {}), "(['addmul_feedforward_big'])\n", (2995, 3023), False, 'import lib\n'), ((3044, 3084), 'lib.common.group', 'group', (['feedforward_runs', "['layer_sizes']"], {}), "(feedforward_runs, ['layer_sizes'])\n", (3049, 3084), False, 'from lib.common import group, calc_stat\n'), ((3096, 3127), 'lib.common.group', 'group', (['rnn_runs', "['tuple.mode']"], {}), "(rnn_runs, ['tuple.mode'])\n", (3101, 3127), False, 'from lib.common import group, calc_stat\n'), ((939, 967), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[6, 1.6]'}), '(figsize=[6, 1.6])\n', (949, 967), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1730), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy [\\\\%]"""'], {}), "('Accuracy [\\\\%]')\n", (1712, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1791), 'matplotlib.pyplot.legend', 'plt.legend', (["['FNN $+$', 'FNN $*$', 'RNN $+$', 'RNN $*$']"], {}), "(['FNN $+$', 'FNN $*$', 'RNN $+$', 'RNN $*$'])\n", (1745, 1791), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1872), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (1865, 1872), False, 'import os\n')]
|
from rest_framework import serializers
from typing import Optional, TYPE_CHECKING
from .models import Profile, Scratch
from .github import GitHubUser
from .middleware import Request
def serialize_profile(request: Request, profile: Profile):
if profile.user is None:
return {
"is_you": profile == request.profile,
"is_anonymous": True,
}
else:
user = profile.user
github: Optional[GitHubUser] = GitHubUser.objects.filter(user=user).first()
github_details = github.details() if github else None
return {
"is_you": user == request.user,
"is_anonymous": False,
"id": user.id,
"username": user.username,
"email": user.email,
"name": github_details.name if github_details else user.username,
"avatar_url": github_details.avatar_url if github_details else None,
"github_api_url": github_details.url if github_details else None,
"github_html_url": github_details.html_url if github_details else None,
}
if TYPE_CHECKING:
ProfileFieldBaseClass = serializers.RelatedField[Profile, str, str]
else:
ProfileFieldBaseClass = serializers.RelatedField
class ProfileField(ProfileFieldBaseClass):
def to_representation(self, profile: Profile):
return serialize_profile(self.context["request"], profile)
class ScratchCreateSerializer(serializers.Serializer[None]):
compiler = serializers.CharField(allow_blank=True, required=True)
platform = serializers.CharField(allow_blank=True, required=False)
compiler_flags = serializers.CharField(allow_blank=True, required=False)
source_code = serializers.CharField(allow_blank=True, required=False)
target_asm = serializers.CharField(allow_blank=True)
# TODO: `context` should be renamed; it conflicts with Field.context
context = serializers.CharField(allow_blank=True) # type: ignore
diff_label = serializers.CharField(allow_blank=True, required=False)
class ScratchSerializer(serializers.ModelSerializer[Scratch]):
class Meta:
model = Scratch
fields = ["slug", "name", "description", "compiler", "platform", "compiler_flags", "target_assembly", "source_code", "context", "diff_label", "score", "max_score"]
# XXX: ideally we would just use ScratchSerializer, but adding owner and parent breaks creation
class ScratchWithMetadataSerializer(serializers.ModelSerializer[Scratch]):
owner = ProfileField(read_only=True)
parent = serializers.HyperlinkedRelatedField( # type: ignore
read_only=True,
view_name="scratch-detail",
lookup_field="slug",
)
class Meta:
model = Scratch
fields = ["slug", "name", "description", "compiler", "platform", "compiler_flags", "source_code", "context", "owner", "parent", "diff_label", "score", "max_score"]
|
[
"rest_framework.serializers.HyperlinkedRelatedField",
"rest_framework.serializers.CharField"
] |
[((1483, 1537), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(True)'}), '(allow_blank=True, required=True)\n', (1504, 1537), False, 'from rest_framework import serializers\n'), ((1553, 1608), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(False)'}), '(allow_blank=True, required=False)\n', (1574, 1608), False, 'from rest_framework import serializers\n'), ((1630, 1685), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(False)'}), '(allow_blank=True, required=False)\n', (1651, 1685), False, 'from rest_framework import serializers\n'), ((1704, 1759), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(False)'}), '(allow_blank=True, required=False)\n', (1725, 1759), False, 'from rest_framework import serializers\n'), ((1777, 1816), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)'}), '(allow_blank=True)\n', (1798, 1816), False, 'from rest_framework import serializers\n'), ((1904, 1943), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)'}), '(allow_blank=True)\n', (1925, 1943), False, 'from rest_framework import serializers\n'), ((1976, 2031), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'allow_blank': '(True)', 'required': '(False)'}), '(allow_blank=True, required=False)\n', (1997, 2031), False, 'from rest_framework import serializers\n'), ((2534, 2639), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'read_only': '(True)', 'view_name': '"""scratch-detail"""', 'lookup_field': '"""slug"""'}), "(read_only=True, view_name=\n 'scratch-detail', lookup_field='slug')\n", (2569, 2639), False, 'from rest_framework import serializers\n')]
|
from rest_framework.routers import DefaultRouter
from django.conf.urls import include, url
from .views import UserViewSet
app_name = 'apps.users'
# # Here the comments and activity is added manually by including them under the detail view.
#
# urlpatterns = [
#
# url(
# regex=r'^$',
# view=UserViewSet.as_view({'get': 'list'}),
# name='user-list'
# ),
#
# url(
# r'^(?P<pk>[\w.@+-]+)/',
# include([
# url(
# regex=r'^$',
# view=UserViewSet.as_view({'get': 'retrieve'}),
# name='user-detail'
# ),
# url(
# regex=r'^comments/$',
# view=UserViewSet.as_view({'get': 'comments', 'post': 'comments'}),
# name='user-comments'
# ),
# url(
# regex=r'^activities/$',
# view=UserViewSet.as_view({'get': 'activities'}),
# name='user-activities'
# ),
# ])
# ),
#
# ]
# If we use a router the comments and activity is added via
# the @detail_route decorator in the CommentsMixin and ActivitiesMixin.
# This means, that nothing has to be changed here.
router = DefaultRouter()
router.register(r'', UserViewSet)
urlpatterns = router.urls
|
[
"rest_framework.routers.DefaultRouter"
] |
[((1234, 1249), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (1247, 1249), False, 'from rest_framework.routers import DefaultRouter\n')]
|
from pathlib import Path
import pytest
import os
from powrap import powrap
FIXTURE_DIR = Path(__file__).resolve().parent
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "glossary.po",))
def test_fail_on_bad_wrapping(po_file, capsys):
assert powrap.check_style([po_file]) == 1
assert str(po_file) in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po"))
def test_succees_on_good_wrapping(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad" / "invalid_po_file.po",))
def test_msgcat_error(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", ("non_existent_file.po",))
def test_fileread_error(po_file, capsys):
assert powrap.check_style([po_file]) == 0
assert str(po_file) not in capsys.readouterr().err
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po"))
def test_wrong_msgcat(po_file):
"""Test if msgcat is not available"""
environ_saved = os.environ["PATH"]
os.environ["PATH"] = ""
with pytest.raises(SystemExit) as sysexit:
powrap.check_style([po_file])
os.environ["PATH"] = environ_saved
assert sysexit.type == SystemExit
assert sysexit.value.code == 127
|
[
"pytest.mark.parametrize",
"pytest.raises",
"powrap.powrap.check_style",
"pathlib.Path"
] |
[((127, 201), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""po_file"""', "(FIXTURE_DIR / 'bad' / 'glossary.po',)"], {}), "('po_file', (FIXTURE_DIR / 'bad' / 'glossary.po',))\n", (150, 201), False, 'import pytest\n'), ((578, 663), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""po_file"""', "(FIXTURE_DIR / 'bad' / 'invalid_po_file.po',)"], {}), "('po_file', (FIXTURE_DIR / 'bad' /\n 'invalid_po_file.po',))\n", (601, 663), False, 'import pytest\n'), ((804, 865), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""po_file"""', "('non_existent_file.po',)"], {}), "('po_file', ('non_existent_file.po',))\n", (827, 865), False, 'import pytest\n'), ((261, 290), 'powrap.powrap.check_style', 'powrap.check_style', (['[po_file]'], {}), '([po_file])\n', (279, 290), False, 'from powrap import powrap\n'), ((485, 514), 'powrap.powrap.check_style', 'powrap.check_style', (['[po_file]'], {}), '([po_file])\n', (503, 514), False, 'from powrap import powrap\n'), ((711, 740), 'powrap.powrap.check_style', 'powrap.check_style', (['[po_file]'], {}), '([po_file])\n', (729, 740), False, 'from powrap import powrap\n'), ((919, 948), 'powrap.powrap.check_style', 'powrap.check_style', (['[po_file]'], {}), '([po_file])\n', (937, 948), False, 'from powrap import powrap\n'), ((1234, 1259), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (1247, 1259), False, 'import pytest\n'), ((1280, 1309), 'powrap.powrap.check_style', 'powrap.check_style', (['[po_file]'], {}), '([po_file])\n', (1298, 1309), False, 'from powrap import powrap\n'), ((92, 106), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (96, 106), False, 'from pathlib import Path\n')]
|
import exrex
import logging
import os
import multiprocessing
import numpy as np
from scipy.stats import genlogistic
from scipy.ndimage.filters import median_filter, uniform_filter1d
from functools import partial
from patteRNA.LBC import LBC
from patteRNA import rnalib, filelib, timelib, misclib, viennalib
from tqdm import tqdm
LOCK = multiprocessing.Lock()
logger = logging.getLogger(__name__)
clock = timelib.Clock()
class ScoringManager:
def __init__(self, model, run_config):
self.model = model
self.run_config = run_config
self.mp_tasks = run_config['n_tasks']
self.mp_pool = None
self.motifs = []
self.cscore_dists = None
self.dataset = None
self.no_vienna = run_config['no_vienna']
self.lbc = LBC()
if run_config['motif'] is not None:
self.parse_motifs()
def parse_motifs(self):
expression = self.run_config['motif']
expression = expression.replace('(', r'\(')
expression = expression.replace('.', r'\.')
expression = expression.replace(')', r'\)')
motifs = exrex.generate(expression)
self.motifs = list(filter(rnalib.valid_db, motifs))
def import_data(self, dataset):
self.dataset = dataset
def execute_scoring(self):
# Compile scoring configuration parameters
scoring_config = {'posteriors': self.run_config['posteriors'],
'hdsl': self.run_config['HDSL'],
'spp': self.run_config['SPP'],
'viterbi': self.run_config['viterbi'],
'suppress_nan': True,
'fp_posteriors': os.path.join(self.run_config['output'], 'posteriors.txt'),
'fp_scores_pre': os.path.join(self.run_config['output'], 'scores_pre'),
'fp_scores': os.path.join(self.run_config['output'], 'scores.txt'),
'fp_hdsl': os.path.join(self.run_config['output'], 'hdsl.txt'),
'fp_spp': os.path.join(self.run_config['output'], 'spp.txt'),
'fp_viterbi': os.path.join(self.run_config['output'], 'viterbi.txt'),
'no_cscores': self.run_config['no_cscores'],
'min_cscores': self.run_config['min_cscores'],
'batch_size': self.run_config['batch_size'],
'motifs': self.motifs,
'path': self.run_config['path'],
'context': self.run_config['context'],
'cscore_dists': None,
'no_vienna': self.no_vienna,
'energy': ~np.any([self.no_vienna,
self.run_config['no_cscores'],
not viennalib.vienna_imported]),
'lbc': self.lbc,
'hdsl_params': self.run_config['hdsl_params']}
self.pool_init() # Initialize parallelized pool
# Prepare score distributions for c-score normalization
if not scoring_config['no_cscores']:
logger.info('Sampling null sites for c-score normalization')
clock.tick()
self.cscore_dists = dict.fromkeys(self.motifs)
cscore_batch = self.make_cscore_batch(scoring_config['min_cscores'])
cscore_batch.pre_process(self.model, scoring=True)
with tqdm(total=len(self.motifs),
leave=False,
unit='motif') as pb_samples:
try:
if scoring_config['path']:
path = np.array(list(scoring_config['path']), dtype=int)
else:
path = None
worker = partial(self.sample_worker, path=path, batch=cscore_batch)
samples_pool = self.mp_pool.imap_unordered(worker, self.motifs)
for (motif, samples) in samples_pool:
params = genlogistic.fit(samples)
self.cscore_dists[motif] = genlogistic(c=params[0], loc=params[1], scale=params[2])
pb_samples.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
scoring_config['cscore_dists'] = self.cscore_dists
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
# Begin formal scoring phase by making batches to save on memory
batches = self.make_batches(scoring_config['batch_size'])
n_batches = len(self.dataset.rnas) // scoring_config['batch_size'] + 1 # Number of batches
if self.motifs:
header = "transcript\tstart score c-score BCE MEL Prob(motif) motif path seq\n"
with open(scoring_config['fp_scores_pre'], 'w') as f:
f.write(header)
logger.info("Executing scoring")
clock.tick()
with tqdm(total=n_batches,
leave=False,
unit='batch',
desc=' Overall') as pbar_batches:
# Process batches sequentially
for i, batch in enumerate(batches):
self.pool_init()
batch.pre_process(self.model)
with tqdm(total=len(batch.rnas),
leave=False,
unit="transcript",
desc="Current batch") as pbar_transcripts:
try:
worker = partial(self.score_worker, model=self.model, config=scoring_config)
jobs = self.mp_pool.imap_unordered(worker, batch.rnas.values())
for _ in jobs:
pbar_transcripts.update()
self.mp_pool.close()
self.mp_pool.join()
except Exception:
self.mp_pool.terminate()
raise
batch.clear()
pbar_batches.update()
# Sort score file
if self.motifs:
scores = filelib.read_score_file(scoring_config['fp_scores_pre'])
if not scores:
os.rename(scoring_config['fp_scores_pre'], scoring_config['fp_scores'])
else:
if scoring_config['no_cscores']:
filelib.write_score_file(sorted(scores, key=lambda score: score['score'], reverse=True),
scoring_config['fp_scores'])
else:
if scoring_config['energy']:
filelib.write_score_file(sorted(scores, key=lambda score: score['Prob(motif)'], reverse=True),
scoring_config['fp_scores'])
else:
filelib.write_score_file(sorted(scores, key=lambda score: score['c-score'], reverse=True),
scoring_config['fp_scores'])
os.remove(scoring_config['fp_scores_pre']) # Clean-up
logger.info(' ... done in {}'.format(misclib.seconds_to_hms(clock.tock())))
@staticmethod
def sample_worker(motif, path, batch):
if path is None:
path = rnalib.dot2states(motif)
scores = []
for transcript in batch.rnas.values():
scores.extend(get_null_scores(transcript, motif, path))
return motif, scores
@staticmethod
def score_worker(transcript, model, config):
model.e_step(transcript) # Apply model to transcripts
outputs = compute_outputs(transcript, model, config)
with LOCK as _:
write_outputs(outputs, config)
def make_cscore_batch(self, min_sample_size):
"""
Scan through RNAs in provided data and determine how many are needed to sufficiently
estimate null distributions for c-score normalization. Return a new Dataset with just
the RNAs to use for score sampling.
Args:
min_sample_size: Minimum number of samples to estimate the null score distribution for a single motif.
Returns:
Dataset of RNAs which is a subset of the provided data and meets the criteria needed for score sampling.
"""
motif_samples = {motif: 0 for motif in self.motifs}
cscore_rnas = []
for rna in self.dataset.rnas.values():
cscore_rnas.append(rna.name)
for motif in self.motifs:
null_sites = count_null_sites(rna, motif)
motif_samples[motif] += null_sites
if np.all([motif_samples[motif] >= min_sample_size for motif in motif_samples]):
break # No more sites needed
return self.dataset.spawn_set(rnas=cscore_rnas)
def make_batches(self, size):
rnas = list(self.dataset.rnas.keys())
while rnas:
rnas_batch = rnas[:size]
rnas[:size] = []
yield self.dataset.spawn_set(rnas=rnas_batch)
def pool_init(self):
self.mp_pool = multiprocessing.Pool(processes=self.mp_tasks,
maxtasksperchild=1000)
def count_null_sites(transcript, motif):
if motif not in transcript.valid_sites.keys():
transcript.find_valid_sites(motif)
if motif not in transcript.nan_sites.keys():
transcript.find_nan_sites(len(motif))
non_null_sites = transcript.nan_sites[len(motif)] | transcript.valid_sites[motif]
count = transcript.T - len(motif) + 1 - len(non_null_sites)
return count
def get_null_scores(transcript, motif, path):
# Get sites which violate sequence constraints
invalid_sites = np.where(~np.in1d(range(transcript.T - len(motif) + 1), transcript.valid_sites[motif]))[0]
null_scores = list(filter(lambda score: ~np.isnan(score['score']),
map(lambda start: score_path(transcript, start, path, motif, None, lbc=False),
invalid_sites)))
return [null_score['score'] for null_score in null_scores]
def compute_cscores(scores, dists):
list(map(lambda score: apply_cscore(score, dists[score['dot-bracket']]), scores))
def apply_cscore(score, dist):
pv = dist.sf(score['score'])
if pv == 0:
log_c = np.Inf
elif np.isnan(pv):
log_c = np.nan
else:
log_c = -np.log10(pv)
score['c-score'] = log_c
def score_path(transcript, start, path, motif, pt, lbc=True, context=40):
m = len(path)
end = start + m - 1
bce = np.nan
mel = np.nan
if np.all(np.isnan(transcript.obs[start:end + 1])):
score = np.nan
else:
score = 0
score += np.log(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])
score += np.sum((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])
score += np.log(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])
if lbc:
rstart = int(np.max((0, start - context)))
rend = int(np.min((len(transcript.seq), end + context)))
start_shift = start - rstart
hcs = rnalib.compile_motif_constraints(pt[0], pt[1], start_shift)
lmfe = viennalib.fold(transcript.seq[rstart:rend])
lcmfe = viennalib.hc_fold(transcript.seq[rstart:rend], hcs=hcs)
mel = lmfe - lcmfe
bce = bce_loss(transcript.gamma[1, start:end + 1], path)
return {'score': score,
'c-score': None,
'start': start,
'transcript': transcript.name,
'dot-bracket': motif,
'path': "".join([str(a) for a in path]),
'BCE': bce,
'MEL': mel,
'Prob(motif)': np.nan,
'seq': transcript.seq[start:start + m]}
def bce_loss(yhat, y):
assert len(yhat) == len(y)
return sum(
-yi * np.log(yhi + 1e-20) if yi == 1 else -(1 - yi) * np.log(1 - yhi + 1e-20) for yhi, yi in zip(yhat, y))
def compute_outputs(transcript, model, config):
outputs = {'name': transcript.name,
'viterbi': '',
'posteriors': '',
'spp': '',
'scores_pre': '',
'hdsl': ''} # Initialize outputs dictionary
if config['viterbi']:
vp = model.viterbi_decoding(transcript) # Viterbi algorithm
outputs['viterbi'] = "> {}\n{}\n".format(transcript.name, "".join([str(i) for i in vp]))
# Posterior pairing probabilities
if config['posteriors']:
transcript.gamma /= np.sum(transcript.gamma, axis=0)[np.newaxis, :]
outputs['posteriors'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in transcript.gamma[0, :]]))
# Smoothed P(paired) measure --> HDSL without augmentation
if config['spp']:
spp_tmp = transcript.gamma[1, :] # Raw pairing probabilities
spp_tmp = uniform_filter1d(spp_tmp, size=5) # Local mean
spp = median_filter(spp_tmp, size=15) # Local median
outputs['spp'] = "> {}\n{}\n".format(transcript.name,
" ".join(["{:1.3f}".format(p) for p in spp]))
if config['motifs']:
transcript.compute_log_B_ratios()
scores = []
for motif in config['motifs']:
if config['path'] is not None:
path = np.array(list(config['path']), dtype=int)
else:
path = rnalib.dot2states(motif)
pt = transcript.find_valid_sites(motif) # Returns motif base pairing list
scores_tmp = list(map(lambda start: score_path(transcript, start, path, motif, pt, lbc=config['energy']),
transcript.valid_sites[motif]))
if config['suppress_nan']:
scores_tmp = list(filter(lambda s: ~np.isnan(s['score']), scores_tmp))
if config['cscore_dists'] is not None:
compute_cscores(scores_tmp, config['cscore_dists'])
scores += scores_tmp
if config['energy']:
config['lbc'].apply_classifier(scores)
outputs['scores_pre'] = format_scores(scores)
# Hairpin-derived structure level measure
if config['hdsl']:
hdsl_tmp = transcript.gamma[1, :] # Pairing probabilities
for score in scores:
# Profile augmentation with hairpin scores
if score['c-score'] > config['hdsl_params'][1]:
end = score['start'] + len(score['dot-bracket'])
boost = config['hdsl_params'][0] * (score['c-score'] - config['hdsl_params'][1])
hdsl_tmp[score['start']:end] += boost
# Clipping to [0, 1]
hdsl_tmp[hdsl_tmp < 0] = 0
hdsl_tmp[hdsl_tmp > 1] = 1
# Smoothing steps
hdsl_tmp = uniform_filter1d(hdsl_tmp, size=5) # Local mean
hdsl = median_filter(hdsl_tmp, size=15) # Local median
outputs['hdsl'] = "> {}\n{}\n".format(transcript.name, " ".join(["{:1.3f}".format(p) for p in hdsl]))
return outputs
def format_scores(scores):
return "".join(["{} {} {:1.2f} {:1.2f} {:1.2f} {:1.2f} {:1.3g} {} {} {}\n".format(
score['transcript'],
score['start'] + 1,
score['score'],
score['c-score'],
score['BCE'],
score['MEL'],
score['Prob(motif)'],
score['dot-bracket'],
score['path'],
score['seq']) for score in scores])
def write_outputs(outputs, config):
output_types = ['viterbi', 'posteriors', 'spp', 'scores_pre', 'hdsl']
for output_type in output_types:
if outputs[output_type]:
with open(config[f'fp_{output_type}'], 'a') as f:
f.write(outputs[output_type])
|
[
"os.remove",
"numpy.sum",
"multiprocessing.Lock",
"numpy.isnan",
"patteRNA.rnalib.compile_motif_constraints",
"scipy.ndimage.filters.uniform_filter1d",
"os.path.join",
"patteRNA.viennalib.hc_fold",
"scipy.stats.genlogistic.fit",
"numpy.max",
"patteRNA.LBC.LBC",
"numpy.log10",
"scipy.ndimage.filters.median_filter",
"functools.partial",
"patteRNA.timelib.Clock",
"tqdm.tqdm",
"os.rename",
"multiprocessing.Pool",
"exrex.generate",
"patteRNA.filelib.read_score_file",
"patteRNA.rnalib.dot2states",
"numpy.all",
"numpy.log",
"patteRNA.viennalib.fold",
"numpy.any",
"scipy.stats.genlogistic",
"logging.getLogger"
] |
[((337, 359), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (357, 359), False, 'import multiprocessing\n'), ((369, 396), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (386, 396), False, 'import logging\n'), ((405, 420), 'patteRNA.timelib.Clock', 'timelib.Clock', ([], {}), '()\n', (418, 420), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((780, 785), 'patteRNA.LBC.LBC', 'LBC', ([], {}), '()\n', (783, 785), False, 'from patteRNA.LBC import LBC\n'), ((1112, 1138), 'exrex.generate', 'exrex.generate', (['expression'], {}), '(expression)\n', (1126, 1138), False, 'import exrex\n'), ((9336, 9404), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'self.mp_tasks', 'maxtasksperchild': '(1000)'}), '(processes=self.mp_tasks, maxtasksperchild=1000)\n', (9356, 9404), False, 'import multiprocessing\n'), ((10590, 10602), 'numpy.isnan', 'np.isnan', (['pv'], {}), '(pv)\n', (10598, 10602), True, 'import numpy as np\n'), ((10864, 10903), 'numpy.isnan', 'np.isnan', (['transcript.obs[start:end + 1]'], {}), '(transcript.obs[start:end + 1])\n', (10872, 10903), True, 'import numpy as np\n'), ((10974, 11053), 'numpy.log', 'np.log', (['(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])'], {}), '(transcript.alpha[path[0], start] / transcript.alpha[1 - path[0], start])\n', (10980, 11053), True, 'import numpy as np\n'), ((11071, 11142), 'numpy.sum', 'np.sum', (['((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])'], {}), '((2 * path[1:-1] - 1) * transcript.log_B_ratio[1, start + 1:end])\n', (11077, 11142), True, 'import numpy as np\n'), ((11160, 11235), 'numpy.log', 'np.log', (['(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])'], {}), '(transcript.beta[path[-1], end] / transcript.beta[1 - path[-1], end])\n', (11166, 11235), True, 'import numpy as np\n'), ((1694, 1751), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""posteriors.txt"""'], {}), "(self.run_config['output'], 'posteriors.txt')\n", (1706, 1751), False, 'import os\n'), ((1796, 1849), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""scores_pre"""'], {}), "(self.run_config['output'], 'scores_pre')\n", (1808, 1849), False, 'import os\n'), ((1890, 1943), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""scores.txt"""'], {}), "(self.run_config['output'], 'scores.txt')\n", (1902, 1943), False, 'import os\n'), ((1982, 2033), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""hdsl.txt"""'], {}), "(self.run_config['output'], 'hdsl.txt')\n", (1994, 2033), False, 'import os\n'), ((2071, 2121), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""spp.txt"""'], {}), "(self.run_config['output'], 'spp.txt')\n", (2083, 2121), False, 'import os\n'), ((2163, 2217), 'os.path.join', 'os.path.join', (["self.run_config['output']", '"""viterbi.txt"""'], {}), "(self.run_config['output'], 'viterbi.txt')\n", (2175, 2217), False, 'import os\n'), ((5173, 5243), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_batches', 'leave': '(False)', 'unit': '"""batch"""', 'desc': '""" Overall"""'}), "(total=n_batches, leave=False, unit='batch', desc=' Overall')\n", (5177, 5243), False, 'from tqdm import tqdm\n'), ((6347, 6403), 'patteRNA.filelib.read_score_file', 'filelib.read_score_file', (["scoring_config['fp_scores_pre']"], {}), "(scoring_config['fp_scores_pre'])\n", (6370, 6403), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((7518, 7542), 'patteRNA.rnalib.dot2states', 'rnalib.dot2states', (['motif'], {}), '(motif)\n', (7535, 7542), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((8880, 8958), 'numpy.all', 'np.all', (['[(motif_samples[motif] >= min_sample_size) for motif in motif_samples]'], {}), '([(motif_samples[motif] >= min_sample_size) for motif in motif_samples])\n', (8886, 8958), True, 'import numpy as np\n'), ((11436, 11495), 'patteRNA.rnalib.compile_motif_constraints', 'rnalib.compile_motif_constraints', (['pt[0]', 'pt[1]', 'start_shift'], {}), '(pt[0], pt[1], start_shift)\n', (11468, 11495), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((11515, 11558), 'patteRNA.viennalib.fold', 'viennalib.fold', (['transcript.seq[rstart:rend]'], {}), '(transcript.seq[rstart:rend])\n', (11529, 11558), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((11579, 11634), 'patteRNA.viennalib.hc_fold', 'viennalib.hc_fold', (['transcript.seq[rstart:rend]'], {'hcs': 'hcs'}), '(transcript.seq[rstart:rend], hcs=hcs)\n', (11596, 11634), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((12834, 12866), 'numpy.sum', 'np.sum', (['transcript.gamma'], {'axis': '(0)'}), '(transcript.gamma, axis=0)\n', (12840, 12866), True, 'import numpy as np\n'), ((13258, 13291), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['spp_tmp'], {'size': '(5)'}), '(spp_tmp, size=5)\n', (13274, 13291), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((13324, 13355), 'scipy.ndimage.filters.median_filter', 'median_filter', (['spp_tmp'], {'size': '(15)'}), '(spp_tmp, size=15)\n', (13337, 13355), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((15220, 15254), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['hdsl_tmp'], {'size': '(5)'}), '(hdsl_tmp, size=5)\n', (15236, 15254), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((15288, 15320), 'scipy.ndimage.filters.median_filter', 'median_filter', (['hdsl_tmp'], {'size': '(15)'}), '(hdsl_tmp, size=15)\n', (15301, 15320), False, 'from scipy.ndimage.filters import median_filter, uniform_filter1d\n'), ((2747, 2838), 'numpy.any', 'np.any', (["[self.no_vienna, self.run_config['no_cscores'], not viennalib.vienna_imported]"], {}), "([self.no_vienna, self.run_config['no_cscores'], not viennalib.\n vienna_imported])\n", (2753, 2838), True, 'import numpy as np\n'), ((6447, 6518), 'os.rename', 'os.rename', (["scoring_config['fp_scores_pre']", "scoring_config['fp_scores']"], {}), "(scoring_config['fp_scores_pre'], scoring_config['fp_scores'])\n", (6456, 6518), False, 'import os\n'), ((7272, 7314), 'os.remove', 'os.remove', (["scoring_config['fp_scores_pre']"], {}), "(scoring_config['fp_scores_pre'])\n", (7281, 7314), False, 'import os\n'), ((10654, 10666), 'numpy.log10', 'np.log10', (['pv'], {}), '(pv)\n', (10662, 10666), True, 'import numpy as np\n'), ((11278, 11306), 'numpy.max', 'np.max', (['(0, start - context)'], {}), '((0, start - context))\n', (11284, 11306), True, 'import numpy as np\n'), ((13812, 13836), 'patteRNA.rnalib.dot2states', 'rnalib.dot2states', (['motif'], {}), '(motif)\n', (13829, 13836), False, 'from patteRNA import rnalib, filelib, timelib, misclib, viennalib\n'), ((3888, 3946), 'functools.partial', 'partial', (['self.sample_worker'], {'path': 'path', 'batch': 'cscore_batch'}), '(self.sample_worker, path=path, batch=cscore_batch)\n', (3895, 3946), False, 'from functools import partial\n'), ((10103, 10127), 'numpy.isnan', 'np.isnan', (["score['score']"], {}), "(score['score'])\n", (10111, 10127), True, 'import numpy as np\n'), ((12172, 12191), 'numpy.log', 'np.log', (['(yhi + 1e-20)'], {}), '(yhi + 1e-20)\n', (12178, 12191), True, 'import numpy as np\n'), ((12220, 12243), 'numpy.log', 'np.log', (['(1 - yhi + 1e-20)'], {}), '(1 - yhi + 1e-20)\n', (12226, 12243), True, 'import numpy as np\n'), ((4123, 4147), 'scipy.stats.genlogistic.fit', 'genlogistic.fit', (['samples'], {}), '(samples)\n', (4138, 4147), False, 'from scipy.stats import genlogistic\n'), ((4199, 4255), 'scipy.stats.genlogistic', 'genlogistic', ([], {'c': 'params[0]', 'loc': 'params[1]', 'scale': 'params[2]'}), '(c=params[0], loc=params[1], scale=params[2])\n', (4210, 4255), False, 'from scipy.stats import genlogistic\n'), ((5749, 5816), 'functools.partial', 'partial', (['self.score_worker'], {'model': 'self.model', 'config': 'scoring_config'}), '(self.score_worker, model=self.model, config=scoring_config)\n', (5756, 5816), False, 'from functools import partial\n'), ((14201, 14221), 'numpy.isnan', 'np.isnan', (["s['score']"], {}), "(s['score'])\n", (14209, 14221), True, 'import numpy as np\n')]
|
import os
from .base import GnuRecipe
class FreeFontRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(FreeFontRecipe, self).__init__(*args, **kwargs)
self.sha256 = '7c85baf1bf82a1a1845d1322112bc6ca' \
'982221b484e3b3925022e25b5cae89af'
self.depends = ['fontconfig', 'unzip']
self.name = 'freefont'
self.version = '20120503'
self.url = 'ftp://ftp.gnu.org/pub/gnu/freefont/' \
'freefont-ttf-$version.zip'
self.install_args = [['install', '-v', '-d', '-m755',
'%s/share/fonts/freefont' % self.prefix_dir],
['install', '-v', '-m644', '*.ttf',
'%s/share/fonts/freefont' % self.prefix_dir]]
def extract(self):
self.log_dir('extract', self.directory, 'extracting')
self.extract_args = ['unzip', self.filename, '-d', self.directory]
self.run_exe(self.extract_args, self.tarball_dir, self.environment)
self.directory = os.path.join(self.directory,
'freefont-%s' % self.version)
def configure(self):
pass
def compile(self):
pass
def post_install(self):
self.log_dir('post-install', self.directory, 'fc-cache')
self.run_exe(['fc-cache'], self.directory, self.environment)
|
[
"os.path.join"
] |
[((1053, 1111), 'os.path.join', 'os.path.join', (['self.directory', "('freefont-%s' % self.version)"], {}), "(self.directory, 'freefont-%s' % self.version)\n", (1065, 1111), False, 'import os\n')]
|
import src.frontend.utility.utility as utility
import src.model.universe as universe
class rd_obj():
def __init__(self):
self.name = ''
self.declared = False
def declaration_collision(self):
pass
class rd_list(list):
def setup(self, obj_constructor):
self.obj_constructor = obj_constructor
return self
def reference(self, obj_string, constructor_override=None):
if obj_string in [x.name for x in self]:
return self.get_object(obj_string)
else:
if constructor_override is not None:
new_obj = constructor_override()
else:
new_obj = self.obj_constructor()
new_obj.name = obj_string
new_obj.declared = False
self.append(new_obj)
return new_obj
def declare(self, obj_string, constructor_override=None):
if obj_string in [x.name for x in self]:
declared_obj = self.get_object(obj_string)
if declared_obj.declared:
declared_obj.declaration_collision()
raise Exception('object collision')
else:
declared_obj.declared = True
return declared_obj
else:
if constructor_override is not None:
new_obj = constructor_override()
else:
new_obj = self.obj_constructor()
new_obj.name = obj_string
new_obj.declared = True
self.append(new_obj)
return new_obj
def get_object(self, obj_string):
for obj in self:
if obj_string == obj.name:
return obj
raise Exception('object not accessible')
def validate(self):
for obj in self:
if not obj.declared:
raise Exception(obj.name + ' has not been declared')
class global_object():
def __init__(self):
self.sandbox = rd_list().setup(sandbox)
self.maps = map_collection()
self.universe = universe.universe()
self.universe.initialize()
def resolve(self):
self.sandbox.validate()
for sandbox in self.sandbox:
sandbox.variables.validate()
sandbox.services.validate()
class map_collection():
def __init__(self):
self.maps = list()
def add(self, event):
if self.non_colliding_keys(event) and self.non_colliding_files(event):
if event.string in [x.string for x in self.maps]:
old_event = self.return_matching_event(event)
self.merge_events(old_event, event)
else:
self.maps.append(event)
else:
raise Exception('Cannot add event "' + event.string + '" to collection, key "' + event.key + '" already in collection. The same key cannot be bound to multiple events.')
def return_matching_event(self, event):
for maps in self.maps:
if event.string == maps.string:
return maps
def non_colliding_keys(self, event):
if event.key is None or event.key not in [x.key for x in self.maps]:
return True
else:
raise Exception('Cannot add event "' + event.string + '" to collection, key "' + event.key + '" already in collection. The same key cannot be bound to multiple events.')
def non_colliding_files(self, event):
if event.file is None or event.file not in [x.file for x in self.maps]:
return True
else:
raise Exception('Cannot add event "' + event.string + '" to collection, file "' + event.file + '" already in collection. The same file cannot be bound to multiple events.')
def merge_events(self, old_event, new_event):
if new_event.key is not None:
old_event.key = new_event.key
if new_event.file is not None:
old_event.file = new_event.file
old_event.services = old_event.services + new_event.services
class sandbox(rd_obj):
def __init__(self):
rd_obj.__init__(self)
self.variables = rd_list().setup(variable)
self.services = rd_list().setup(service)
class variable(rd_obj):
def __init__(self, name=''):
rd_obj.__init__(self)
self.name = name
self.type = 'int'
self.value = '0'
self.word_size = '8'
def set_value(self, value):
if int(value) < 2**int(self.word_size) and int(value) >= 0:
self.value = value
else:
raise Exception('illegal value declaration:' + value + ' . Number not within bounds of the word size')
class constant(variable):
def __init__(self, value='0'):
variable.__init__(self, value)
self.set_value(value)
class service(rd_obj):
def __init__(self):
rd_obj.__init__(self)
self.sequence = list()
self.is_anonymous = False
class event():
def __init__(self, string):
self.string = string
self.key = None
self.file = None
self.services = list()
def add_key(self, key_string):
if self.key == None:
self.key = key_string
else:
raise Exception('event "' + self.string + '" already has key "' + self.key + '". Cannot assign "' + key_string + '" to "' + self.string + '"')
def add_file(self, file_string):
if self.file == None:
self.file = file_string
else:
raise Exception('event "' + self.string + '" already has file "' + self.file + '". Cannot assign "' + file_string + '" to "' + self.string + '"')
class statement():
def __init__(self):
self.identifier = ''
self.arg = list()
class assignment(statement):
def __init__(self):
self.arg = [None]
class service_call(statement):
def __init__(self):
self.identifier = ''
class source_call(statement):
def __init__(self):
self.arg = [None]
class if_statement():
def __init__(self):
self.true_service = None
self.false_service = None
self.condition = None
class jump_statement():
def __init__(self):
self.var = None
self.services = list()
class operator():
def __init__(self):
identity = ''
output = ''
arg = list()
class unary_operator(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None]
class binary_operator(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None] * 2
class conditional(operator):
def __init__(self):
operator.__init__(self)
self.arg = [None] * 2
def is_assignment(arg):
return isinstance(arg, assignment)
def is_service_call(arg):
return isinstance(arg, service_call)
def is_operator(arg):
return isinstance(arg, operator)
def is_unary_operator(arg):
return isinstance(arg, unary_operator)
def is_binary_operator(arg):
return isinstance(arg, binary_operator)
def is_variable(arg):
return isinstance(arg, variable)
def is_constant(arg):
return isinstance(arg, constant)
def is_literal_value(arg):
return isinstance(arg, literal_value)
def is_source_call(arg):
return isinstance(arg, source_call)
def is_key(arg):
return isinstance(arg, key)
def is_if_statement(arg):
return isinstance(arg, if_statement)
def is_jump_statement(arg):
return isinstance(arg, jump_statement)
def is_conditional(arg):
return isinstance(arg, conditional)
|
[
"src.model.universe.universe"
] |
[((1651, 1670), 'src.model.universe.universe', 'universe.universe', ([], {}), '()\n', (1668, 1670), True, 'import src.model.universe as universe\n')]
|
# Import Third-Party
from requests import Response
class APIResponse(Response):
def __init__(self, req_response, formatted_json=None):
for k, v in req_response.__dict__.items():
self.__dict__[k] = v
self._formatted = formatted_json
@property
def formatted(self):
return self._formatted
@formatted.setter
def formatted(self, value):
self._formatted = value
if __name__ == '__main__':
from bitex import Kraken
k = Kraken()
resp = k.ticker('XXBTZEUR')
print(resp.formatted)
print(resp.json())
|
[
"bitex.Kraken"
] |
[((492, 500), 'bitex.Kraken', 'Kraken', ([], {}), '()\n', (498, 500), False, 'from bitex import Kraken\n')]
|
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import collections
import urllib
import re
import six
from concurrent.futures import ThreadPoolExecutor, wait
import requests
from jq import jq
import koji
import gluetool
from gluetool import GlueError
from gluetool.action import Action
from gluetool.utils import cached_property, normalize_multistring_option, dict_update
from gluetool.log import LoggerMixin, log_dict
# Type annotations
from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set # noqa
from typing_extensions import TypedDict
#: Information about task architectures.
#:
#: :ivar list(str) arches: List of architectures.
TaskArches = NamedTuple('TaskArches', [('arches', List[str])])
#: Information about MBS.
#:
#: :ivar str api_version: MBS API version.
#: :ivar str auth_method: MBS authentication method.
#: :ivar str version: MBS version.
MBSAbout = NamedTuple('MBSAbout', [
('api_version', str),
('auth_method', str),
('version', str)
])
# regular expressions for nvr and nsvc of a module
NSVC_REGEX = re.compile(r'^([^:]*):([^:]*):([^:]*):([^:]*)$')
NVR_REGEX = re.compile(r'^(.*)-([^-]*)-([^\.]*)\.(.*)$')
NSVCType = Tuple[str, str, str, str]
BuildInfoType = TypedDict(
'BuildInfoType',
{
'id': int,
'name': str,
'stream': str,
'version': str,
'context': str,
'owner': str,
'scratch': str,
'modulemd': str,
'scmurl': str,
}
)
def nsvc_from_string(nsvc):
# type: (str) -> NSVCType
"""
Helper function to return a tuple of NSVC from a string.
:param: str nsvc: NSVC string.
:rtype: tuple
:returns: Tuple of N, S, V, C.
:raises: gluetool.GlueError if NSVC not valid.
"""
match = re.match(NSVC_REGEX, nsvc)
if not match:
raise gluetool.GlueError("'{}' is not a valid module nsvc".format(nsvc))
return cast(NSVCType, match.groups())
def nsvc_from_nvr(nvr):
# type: (str) -> NSVCType
"""
Helper function to return a tuple of NSVC from an Brew/Koji compatible module NVR.
:param: str nvr: NVR string.
:rtype: tuple
:returns: Tuple of N, S, V, C.
:raises: gluetool.GlueError if NVR not valid.
"""
match = re.match(NVR_REGEX, nvr)
if not match:
raise gluetool.GlueError("'{}' is not a valid module nvr".format(nvr))
(name, stream, version, context) = match.groups()
# underscore in stream number must be converted to '-'
stream = stream.replace('_', '-')
return (name, stream, version, context)
class MBSApi(object):
def __init__(self, mbs_api_url, mbs_ui_url, module):
# type: (str, str, gluetool.Module) -> None
self.mbs_api_url = mbs_api_url
self.mbs_ui_url = mbs_ui_url
self.module = module
@cached_property
def about(self):
# type: () -> MBSAbout
"""
Returns MBS about endpoint as a namedtuple.
:rtype: MBSAbout
:returns: MBS about namedtuple with fields api_version, auth_method and version.
"""
return MBSAbout(**self._get_json('module-build-service/1/about'))
def _get_json(self, location, params=None):
# type: (str, Optional[Dict[str, Any]]) -> Any
"""
Query MBS API endpoint location and return the JSON reply.
:param str location: API endpoint to query.
:param dict params: Query parameters
:rtype: dict
:returns: JSON output as a dictionary.
"""
params = params or {}
url = '{}/{}'.format(self.mbs_api_url, location)
if params:
# keep params sorted in the URL - makes testing possible
sorted_params = collections.OrderedDict([
(name, params[name]) for name in sorted(params.iterkeys())
])
url = '{}?{}'.format(url, urllib.urlencode(sorted_params))
self.module.debug('[MBS API]: {}'.format(url))
with Action('query MBS API', parent=Action.current_action(), logger=self.module.logger, tags={
'location': location,
'params': params
}):
try:
output = requests.get(url).json()
except Exception:
raise gluetool.GlueError('Unable to get: {}'.format(url))
log_dict(self.module.debug, '[MBS API] output', output)
return output
def get_build_info_by_id(self, build_id, verbose=False):
# type: (int, bool) -> BuildInfoType
"""
Get MBS build information from build ID.
:param int build_id: MBS build ID.
:param boolean verbose: Verbose query.
:rtype: dict
:returns: JSON output with given build informations.
"""
params = {'verbose': 1 if verbose else 0}
return cast(
BuildInfoType,
self._get_json('module-build-service/1/module-builds/{}'.format(build_id), params=params)
)
def get_build_info_by_nsvc(self, nsvc_tuple, verbose=False):
# type: (NSVCType, bool) -> BuildInfoType
"""
Get MBS build information from NSVC tuple.
:param tuple nsvc_tuple: Build NSVC as a tuple.
:param boolean verbose: Verbose query.
:rtype: dict
:returns: JSON output with given build informations.
"""
(name, stream, version, context) = nsvc_tuple
url = 'module-build-service/1/module-builds/'
params = {
'name': name,
'stream': stream,
'version': version,
'context': context,
'verbose': 1 if verbose else 0
}
try:
return cast(BuildInfoType, self._get_json(url, params=params)['items'][0])
except (IndexError, KeyError):
raise gluetool.GlueError(
"Could not find module with nsvc '{}:{}:{}:{}'".format(name, stream, version, context)
)
def get_build_ui_url(self, build_id):
# type: (int) -> str
"""
Returns URL to the MBS web interface for the given build ID.
:param int build_id: MBS build ID.
:rtype: str
:returns: URL to web interface of the MBS build.
"""
return '{}/module/{}'.format(self.mbs_ui_url, build_id)
class MBSTask(LoggerMixin, object):
ARTIFACT_NAMESPACE = 'redhat-module'
def __init__(self, module, build_id=None, nsvc=None, nvr=None):
# type: (MBS, Optional[int], Optional[str], Optional[str]) -> None
super(MBSTask, self).__init__(module.logger)
self.module = module
mbs_api = module.mbs_api()
if sum([bool(param) for param in [build_id, nsvc, nvr]]) != 1:
raise gluetool.GlueError('module must be initialized only from one of build_id, nsvc or nvr')
if build_id:
build_info = mbs_api.get_build_info_by_id(build_id, verbose=True)
if nsvc:
build_info = mbs_api.get_build_info_by_nsvc(nsvc_from_string(nsvc), verbose=True)
if nvr:
build_info = mbs_api.get_build_info_by_nsvc(nsvc_from_nvr(nvr), verbose=True)
self._build_info = build_info
self.id = self.dispatch_id = build_info['id']
self.name = build_info['name']
self.component = self.name
self.stream = build_info['stream']
self.version = build_info['version']
self.context = build_info['context']
self.issuer = build_info['owner']
self.scratch = build_info['scratch']
self.nsvc = '{}:{}:{}:{}'.format(self.name, self.stream, self.version, self.context)
self.tags = [] # type: List[str]
# `nvr` is:
# - often used as unique id of artifact (e.g. in mail notifications)
# - same as nvr of module in Brew/Koji
# - for modules the nvr is diffrent from NSVC, as it is delimited with '-' instead of ':'
# and also in case of stream the character '-' is replaced with '_', see:
# https://github.com/release-engineering/resultsdb-updater/pull/73#discussion_r235964781
# - if build is scratch, the '+' and id is added to the end
self.nvr = '{}-{}-{}.{}'.format(self.name, self.stream.replace('-', '_'), self.version, self.context)
if self.scratch:
self.nvr = '{}+{}'.format(self.nvr, self.id)
# make devel module nvr available for convenience
self.devel_nvr = '{}-devel-{}-{}.{}'.format(
self.name, self.stream.replace('-', '_'), self.version, self.context
)
# build tags from brew, only applicable to non-scratch modules, scratch modules do not have metadata in Brew
if not self.scratch:
self.tags = [tag['name'] for tag in self.module.shared('koji_session').listTags(self.nvr)]
# this string identifies component in static config file
self.component_id = '{}:{}'.format(self.name, self.stream)
# the target for modules uses platform stream, which nicely reflects the fact for which
# release the module is built for, similarly to what build target in Brew/Koji does
self.target = self.platform_stream
# required API for our modules providing artifacts, we have no destination_tags for modules, use target
self.destination_tag = self.target
@cached_property
def platform_stream(self):
# type: () -> str
"""
:rtype: str
:returns: Platform stream from the modulemd document.
"""
query = ".data.xmd.mbs.buildrequires.platform.stream"
platform_stream = jq(query).transform(self._modulemd)
if not platform_stream:
raise gluetool.GlueError('Could not detect platform stream in modulemd document')
return cast(str, platform_stream.encode('ascii'))
@cached_property
def _modulemd(self):
# type: () -> Dict[str, Any]
"""
Returns ``modulemd`` document if available in build info. Describes details of the artifacts
used to build the module. It is embedded in a form of string, containing the YAML document.
This function extracts the string and unpacks its YAML-ness into a data structure it represents.
:returns: ``modulemd`` structure of ``None`` if there's no ``modulemd`` key in the build info.
"""
if 'modulemd' not in self._build_info:
raise gluetool.GlueError('Artifact build info does not include modulemd document')
# Use "base" loader, to overcome MBS representing some string-like values as numbers,
# for example "5.30" may be expressed as a number `5.30` which the default parser yields
# as a number, `5.3`, which is just misleading. "base" parser yields "5.30", that's
# better. But, it probably treats *all* fields this way, so some fields we're expected
# to be numbers are suddenly strings...
modulemd = gluetool.utils.from_yaml(self._build_info['modulemd'], loader_type='base')
log_dict(self.debug, 'modulemd', modulemd)
return cast(Dict[str, Any], modulemd)
@cached_property
def has_artifacts(self):
# type: () -> bool
# We believe MBS - and Brew behind it keeps artifacts "forever" - or, at least, long enough to matter to us
# - therefore we don't even bother to check for their presence.
return True
@cached_property
def task_arches(self):
# type: () -> TaskArches
"""
:rtype: TaskArches
:returns: Information about arches the task was building for
"""
query = """
.data.components.rpms
| .[]
| .arches
| .[]
"""
# Empty modules do not have components
if 'components' not in self._modulemd['data']:
return cast(TaskArches, self.module._default_task_arches)
all_arches = jq(query).transform(self._modulemd, multiple_output=True)
log_dict(self.debug, 'gathered module arches', all_arches)
# Apparently, output from jq is unicode string, despite feeding it ascii-encoded. Encode each arch
# string to ascii before while we're getting rid of duplicates.
#
# ``set`` to filter out duplicities, ``list`` to convert the set back to a list of uniq arches,
# and ``sorted`` to make it easier to grab & read & test.
arches = sorted(list(set([arch.encode('ascii') for arch in all_arches])))
log_dict(self.debug, 'unique module arches', arches)
return TaskArches(arches)
@cached_property
def dependencies(self):
# type: () -> List[str]
dependencies = []
try:
requires = self._modulemd['data']['dependencies'][0]['requires']
except (AttributeError, KeyError) as error:
raise gluetool.GlueError('Could not detect module dependecies: {}'.format(error))
for module_name, module_streams in six.iteritems(requires):
for stream in module_streams:
dependencies.append('{}:{}'.format(module_name, stream))
return sorted(dependencies)
@cached_property
def url(self):
# type: () -> str
return self.module.mbs_api().get_build_ui_url(self.id)
@cached_property
def distgit_ref(self):
# type: () -> Optional[str]
"""
Distgit ref id from which package has been built or ``None`` if it's impossible to find it.
:rtype: str
:returns: Dist-git ref of the build source.
"""
try:
return self._build_info['scmurl'].split('#')[1].encode('ascii')
except (AttributeError, IndexError):
self.debug('Distgit ref not found in scmurl: {}'.format(self._build_info['scmurl']))
return None
@cached_property
def dist_git_repository_name(self):
# type: () -> str
return self.component
@cached_property
def baseline(self):
# type: () -> Optional[str]
"""
Return baseline task NVR if `baseline-method` specified, otherwise return None.
:rtype: str
"""
if not self.module.option('baseline-method'):
return None
task = cast(MBSTask, self.baseline_task)
return task.nvr
@cached_property
def baseline_task(self):
# type: () -> Optional[MBSTask]
"""
Return baseline task. For documentation of the baseline methods see the module's help.
:rtype: MBSTask
:returns: Initialized task for the baseline build or None if baseline not found.
:raises gluetool.glue.GlueError: if specific build does not exist or no baseline-method specified.
"""
method = self.module.option('baseline-method')
if not method:
raise GlueError("Cannot get baseline because no 'baseline-method' specified")
if method == 'previous-released-build':
previous_tags = self.previous_tags(tags=self.tags)
if not previous_tags:
return None
baseline_task = self.latest_released(tags=previous_tags)
elif method == 'previous-build':
baseline_task = self.latest_released(tags=self._tags_from_map)
elif method == 'specific-build':
nvr = self.module.option('baseline-nvr')
try:
baseline_task = self.module.tasks(nvrs=[nvr])[1]
except GlueError:
raise GlueError("Specific build with nvr '{}' not found".format(nvr))
else:
# this really should not happen ...
self.warn("Unknown baseline method '{}'".format(method), sentry=True)
return None
return baseline_task
def previous_tags(self, tags):
# type: (List[str]) -> List[str]
"""
Return previous tags according to the inheritance tag hierarchy to the given tags.
:param str tags: Tags used for checking.
:rtype: list(str)
:returns: List of previous tags, empty list if previous tags not found.
:raises gluetool.glue.GlueError: In case previous tag search cannot be performed.
"""
previous_tags = []
session = self.module.shared('koji_session')
for tag in tags:
if tag == '<no build target available>':
raise GlueError('Cannot check for previous tag as build target does not exist')
try:
previous_tags.append(session.getFullInheritance(tag)[0]['name'])
except (KeyError, IndexError, koji.GenericError):
self.warn("Failed to find inheritance tree for tag '{}'".format(tag), sentry=True)
return previous_tags
def latest_released(self, tags=None):
# type: (Optional[List[str]]) -> Optional[MBSTask]
"""
Returns task of the latest module build tagged with the same build target.
If no builds are found ``None`` is returned.
In case the build found is the same as this build, the previous build is returned.
The tags for checking can be overriden with the ``tags`` parameter. First match wins.
:param list(str) tags: Tags to use for searching.
:rtype: :py:class:`MBSTask`
"""
tags = tags or [self.target]
session = self.module.shared('koji_session')
for tag in tags:
try:
builds = session.listTagged(tag, None, True, latest=2, package=self.component)
except koji.GenericError as error:
self.warn(
"ignoring error while listing latest builds tagged to '{}': {}".format(tag, error),
sentry=True
)
continue
if builds:
break
else:
log_dict(self.debug, "no latest builds found for package '{}' on tags".format(self.component), tags)
return None
# for scratch builds the latest released package is the latest tagged
if self.scratch:
build = builds[0]
# for non scratch we return the latest released package, in case it is the same, the previously
# released package
else:
if self.nvr != builds[0]['nvr']:
build = builds[0]
else:
build = builds[1] if len(builds) > 1 else None
return self.module.tasks(nvrs=[build['nvr']])[1] if build else None
@cached_property
def _tags_from_map(self):
# type: () -> List[str]
"""
Unfortunately tags used for looking up baseline builds need to be resolved from a rules
file due to their specifics.
Nice examples for this are:
* rhel-8 module builds, which have ``target`` set to el8.X.Y, i.e. module platform stream,
but we need to transform it to Brew module tag ``rhel-8.X.Y-modules-candidate`` for correct
lookup
"""
self.module.require_shared('evaluate_instructions', 'evaluate_rules')
# use dictionary which can be altered in _tags_callback
map = {
'tags': []
} # type: Dict[str, List[str]]
def _tags_callback(instruction, command, argument, context):
# type: (str, str, List[str], str) -> None
map['tags'] = []
for arg in argument:
map['tags'].append(self.module.shared('evaluate_rules', arg, context=context))
context = dict_update(self.module.shared('eval_context'), {
'TASK': self
})
commands = {
'tags': _tags_callback,
}
self.module.shared(
'evaluate_instructions', self.module.baseline_tag_map,
commands=commands, context=context
)
log_dict(self.debug, 'Tags from baseline tag map', map['tags'])
return map['tags']
class MBS(gluetool.Module):
name = 'mbs'
description = 'Provides information about MBS (Module Build Service) artifact'
supported_dryrun_level = gluetool.glue.DryRunLevels.DRY
options = [
('MBS options', {
'mbs-ui-url': {
'help': 'URL of mbs ui server.',
'type': str
},
'mbs-api-url': {
'help': 'URL of mbs api server.',
'type': str
}
}),
('Build initialization options', {
'build-id': {
'help': 'Initialize build from MBS build ID (default: none).',
'action': 'append',
'default': [],
},
'nsvc': {
'help': 'Initialize build from NSVC (default: none).',
'action': 'append',
'default': [],
},
'nvr': {
'help': 'Initialize build from NVR (default: none).',
'action': 'append',
'default': [],
},
}),
('Default options', {
'default-task-arches': {
'help': 'Default task arches to use in case of empty modules.',
'action': 'append'
}
}),
('Baseline options', {
'baseline-method': {
'help': 'Method for choosing the baseline package.',
'choices': ['previous-build', 'specific-build', 'previous-released-build'],
'metavar': 'METHOD',
},
'baseline-nvr': {
'help': "NVR of the build to use with 'specific-build' baseline method",
},
'baseline-tag-map': {
'help': 'Optional rules providing tags which are used for finding baseline package'
}
})
]
required_options = ('mbs-api-url', 'default-task-arches')
shared_functions = ['primary_task', 'tasks', 'mbs_api']
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(MBS, self).__init__(*args, **kwargs)
self._tasks = [] # type: List[MBSTask]
@cached_property
def _default_task_arches(self):
# type: () -> TaskArches
return TaskArches(gluetool.utils.normalize_multistring_option(self.option('default-task-arches')))
def primary_task(self):
# type: () -> Optional[MBSTask]
"""
Returns a `primary` module build, the first build in the list of current nodules.
:rtype: :py:class:`MbsTask` or None
:returns: Instance of an object represeting a module buil or None, if no modules are avaiable.
"""
log_dict(self.debug, 'primary task - current modules', self._tasks)
return self._tasks[0] if self._tasks else None
def _init_mbs_builds(self, build_ids=None, nsvcs=None, nvrs=None):
# type: (Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> None
"""
Initializes MBS builds in parallel.
:param list build_ids: List of module build IDs.
:param list nsvcs: List of module NSVCs.
:param list nvrs: List of NVRs of a module (compatible with brew/koji).
:retype: list(MBSTask)
:returns: List of initialized MBS builds.
"""
build_ids = build_ids or []
nsvcs = nsvcs or []
nvrs = nvrs or []
current_action = Action.current_action()
# Our API routines call `Action.current_action` to get parent for their own actions,
# and since we're spawning threads for our `MBSTask` calls, we need to provide
# the initial action in each of those threads.
def _init_trampoline(**kwargs):
# type: (**Any) -> MBSTask
Action.set_thread_root(current_action)
return MBSTask(self, **kwargs)
with ThreadPoolExecutor(thread_name_prefix="api_thread") as executor:
# initialized from build IDs
futures = {
executor.submit(_init_trampoline, build_id=build_id)
for build_id in build_ids
}
# initialized from NSVCs
futures.update({
executor.submit(_init_trampoline, nsvc=nsvc)
for nsvc in nsvcs
})
# initialized from NVRs
futures.update({
executor.submit(_init_trampoline, nvr=nvr)
for nvr in nvrs
})
Wait = NamedTuple('Wait', (('done', Set[Any]), ('not_done', Set[Any])))
wait_result = cast(Wait, wait(futures))
for future in wait_result.done:
self._tasks.append(future.result())
def tasks(self, build_ids=None, nsvcs=None, nvrs=None):
# type: (Optional[List[str]], Optional[List[str]], Optional[List[str]]) -> List[MBSTask]
"""
Returns list of module builds available. If any of the additional parameters
are provided, modules list is extended with them first.
:param list build_ids: List of module build IDs.
:param list nsvcs: List of module NSVCs.
:param list nvrs: List of NVRs of a module (compatible with brew/koji).
:rtype: list(MBSTask)
:returns: List of module builds.
"""
if any([build_ids, nsvcs, nvrs]):
self._init_mbs_builds(build_ids=build_ids, nsvcs=nsvcs, nvrs=nvrs)
return self._tasks
@property
def eval_context(self):
# type: () -> Dict[str, Union[str, MBSTask, List[str], List[MBSTask]]]
__content__ = { # noqa
'ARTIFACT_TYPE': """
Type of the artifact, ``mbs-build`` in the case of ``mbs`` module.
""",
'BUILD_TARGET': """
Build target for modules is the platform module stream name (e.g. el8, el8.1.0, etc).
""",
'PRIMARY_TASK': """
Primary task, represented as ``MBSTask`` instance.
""",
'TAGS': """
Module Brew/Koji build tags.
""",
'TASKS': """
List of all tasks known to this module instance.
"""
}
primary_task = self.primary_task()
if not primary_task:
self.debug('No primary task available, cannot pass it to eval_context')
return {}
return {
# common for all artifact providers
'ARTIFACT_TYPE': primary_task.ARTIFACT_NAMESPACE,
'BUILD_TARGET': primary_task.target,
'PRIMARY_TASK': primary_task,
'TAGS': primary_task.tags,
'TASKS': self.tasks()
}
@cached_property
def _mbs_api(self):
# type: () -> MBSApi
return MBSApi(self.option('mbs-api-url'), self.option('mbs-ui-url'), self)
@cached_property
def baseline_tag_map(self):
# type: () -> Any
if not self.option('baseline-tag-map'):
return []
return gluetool.utils.load_yaml(self.option('baseline-tag-map'))
def mbs_api(self):
# type: () -> MBSApi
"""
Returns MBSApi instance.
"""
return cast(MBSApi, self._mbs_api)
def execute(self):
# type: () -> None
self.info(
"connected to MBS instance '{}' version '{}'".format(
self.option('mbs-api-url'),
self.mbs_api().about.version
)
)
# koji/brew is required to get module tags
self.require_shared('koji_session')
if any([self.option(opt) for opt in ['build-id', 'nsvc', 'nvr']]):
self._init_mbs_builds(
build_ids=normalize_multistring_option(self.option('build-id')),
nsvcs=normalize_multistring_option(self.option('nsvc')),
nvrs=normalize_multistring_option(self.option('nvr'))
)
for task in self._tasks:
self.info('Initialized with {}: {} ({})'.format(task.id, task.nsvc, task.url))
# init baseline build if requested
if self.option('baseline-method'):
if task.baseline_task:
self.info('Baseline build: {} ({})'.format(task.baseline_task.nvr, task.baseline_task.url))
else:
self.warn('Baseline build was not found')
|
[
"gluetool.action.Action.set_thread_root",
"re.compile",
"typing.cast",
"jq.jq",
"re.match",
"typing_extensions.TypedDict",
"gluetool.action.Action.current_action",
"typing.NamedTuple",
"gluetool.utils.from_yaml",
"urllib.urlencode",
"requests.get",
"concurrent.futures.wait",
"concurrent.futures.ThreadPoolExecutor",
"six.iteritems",
"gluetool.GlueError",
"gluetool.log.log_dict"
] |
[((726, 775), 'typing.NamedTuple', 'NamedTuple', (['"""TaskArches"""', "[('arches', List[str])]"], {}), "('TaskArches', [('arches', List[str])])\n", (736, 775), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((948, 1039), 'typing.NamedTuple', 'NamedTuple', (['"""MBSAbout"""', "[('api_version', str), ('auth_method', str), ('version', str)]"], {}), "('MBSAbout', [('api_version', str), ('auth_method', str), (\n 'version', str)])\n", (958, 1039), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((1114, 1161), 're.compile', 're.compile', (['"""^([^:]*):([^:]*):([^:]*):([^:]*)$"""'], {}), "('^([^:]*):([^:]*):([^:]*):([^:]*)$')\n", (1124, 1161), False, 'import re\n'), ((1175, 1220), 're.compile', 're.compile', (['"""^(.*)-([^-]*)-([^\\\\.]*)\\\\.(.*)$"""'], {}), "('^(.*)-([^-]*)-([^\\\\.]*)\\\\.(.*)$')\n", (1185, 1220), False, 'import re\n'), ((1274, 1443), 'typing_extensions.TypedDict', 'TypedDict', (['"""BuildInfoType"""', "{'id': int, 'name': str, 'stream': str, 'version': str, 'context': str,\n 'owner': str, 'scratch': str, 'modulemd': str, 'scmurl': str}"], {}), "('BuildInfoType', {'id': int, 'name': str, 'stream': str,\n 'version': str, 'context': str, 'owner': str, 'scratch': str,\n 'modulemd': str, 'scmurl': str})\n", (1283, 1443), False, 'from typing_extensions import TypedDict\n'), ((1814, 1840), 're.match', 're.match', (['NSVC_REGEX', 'nsvc'], {}), '(NSVC_REGEX, nsvc)\n', (1822, 1840), False, 'import re\n'), ((2292, 2316), 're.match', 're.match', (['NVR_REGEX', 'nvr'], {}), '(NVR_REGEX, nvr)\n', (2300, 2316), False, 'import re\n'), ((4357, 4412), 'gluetool.log.log_dict', 'log_dict', (['self.module.debug', '"""[MBS API] output"""', 'output'], {}), "(self.module.debug, '[MBS API] output', output)\n", (4365, 4412), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((10950, 11024), 'gluetool.utils.from_yaml', 'gluetool.utils.from_yaml', (["self._build_info['modulemd']"], {'loader_type': '"""base"""'}), "(self._build_info['modulemd'], loader_type='base')\n", (10974, 11024), False, 'import gluetool\n'), ((11034, 11076), 'gluetool.log.log_dict', 'log_dict', (['self.debug', '"""modulemd"""', 'modulemd'], {}), "(self.debug, 'modulemd', modulemd)\n", (11042, 11076), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((11093, 11123), 'typing.cast', 'cast', (['Dict[str, Any]', 'modulemd'], {}), '(Dict[str, Any], modulemd)\n', (11097, 11123), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((12002, 12060), 'gluetool.log.log_dict', 'log_dict', (['self.debug', '"""gathered module arches"""', 'all_arches'], {}), "(self.debug, 'gathered module arches', all_arches)\n", (12010, 12060), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((12512, 12564), 'gluetool.log.log_dict', 'log_dict', (['self.debug', '"""unique module arches"""', 'arches'], {}), "(self.debug, 'unique module arches', arches)\n", (12520, 12564), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((12989, 13012), 'six.iteritems', 'six.iteritems', (['requires'], {}), '(requires)\n', (13002, 13012), False, 'import six\n'), ((14256, 14289), 'typing.cast', 'cast', (['MBSTask', 'self.baseline_task'], {}), '(MBSTask, self.baseline_task)\n', (14260, 14289), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((19826, 19889), 'gluetool.log.log_dict', 'log_dict', (['self.debug', '"""Tags from baseline tag map"""', "map['tags']"], {}), "(self.debug, 'Tags from baseline tag map', map['tags'])\n", (19834, 19889), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((22620, 22687), 'gluetool.log.log_dict', 'log_dict', (['self.debug', '"""primary task - current modules"""', 'self._tasks'], {}), "(self.debug, 'primary task - current modules', self._tasks)\n", (22628, 22687), False, 'from gluetool.log import LoggerMixin, log_dict\n'), ((23357, 23380), 'gluetool.action.Action.current_action', 'Action.current_action', ([], {}), '()\n', (23378, 23380), False, 'from gluetool.action import Action\n'), ((27239, 27266), 'typing.cast', 'cast', (['MBSApi', 'self._mbs_api'], {}), '(MBSApi, self._mbs_api)\n', (27243, 27266), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((6756, 6848), 'gluetool.GlueError', 'gluetool.GlueError', (['"""module must be initialized only from one of build_id, nsvc or nvr"""'], {}), "(\n 'module must be initialized only from one of build_id, nsvc or nvr')\n", (6774, 6848), False, 'import gluetool\n'), ((9708, 9783), 'gluetool.GlueError', 'gluetool.GlueError', (['"""Could not detect platform stream in modulemd document"""'], {}), "('Could not detect platform stream in modulemd document')\n", (9726, 9783), False, 'import gluetool\n'), ((10427, 10503), 'gluetool.GlueError', 'gluetool.GlueError', (['"""Artifact build info does not include modulemd document"""'], {}), "('Artifact build info does not include modulemd document')\n", (10445, 10503), False, 'import gluetool\n'), ((11862, 11912), 'typing.cast', 'cast', (['TaskArches', 'self.module._default_task_arches'], {}), '(TaskArches, self.module._default_task_arches)\n', (11866, 11912), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((14842, 14913), 'gluetool.GlueError', 'GlueError', (['"""Cannot get baseline because no \'baseline-method\' specified"""'], {}), '("Cannot get baseline because no \'baseline-method\' specified")\n', (14851, 14913), False, 'from gluetool import GlueError\n'), ((23708, 23746), 'gluetool.action.Action.set_thread_root', 'Action.set_thread_root', (['current_action'], {}), '(current_action)\n', (23730, 23746), False, 'from gluetool.action import Action\n'), ((23805, 23856), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'thread_name_prefix': '"""api_thread"""'}), "(thread_name_prefix='api_thread')\n", (23823, 23856), False, 'from concurrent.futures import ThreadPoolExecutor, wait\n'), ((24429, 24493), 'typing.NamedTuple', 'NamedTuple', (['"""Wait"""', "(('done', Set[Any]), ('not_done', Set[Any]))"], {}), "('Wait', (('done', Set[Any]), ('not_done', Set[Any])))\n", (24439, 24493), False, 'from typing import cast, Any, Dict, List, Optional, Tuple, Union, NamedTuple, Set\n'), ((3909, 3940), 'urllib.urlencode', 'urllib.urlencode', (['sorted_params'], {}), '(sorted_params)\n', (3925, 3940), False, 'import urllib\n'), ((9621, 9630), 'jq.jq', 'jq', (['query'], {}), '(query)\n', (9623, 9630), False, 'from jq import jq\n'), ((11935, 11944), 'jq.jq', 'jq', (['query'], {}), '(query)\n', (11937, 11944), False, 'from jq import jq\n'), ((16388, 16461), 'gluetool.GlueError', 'GlueError', (['"""Cannot check for previous tag as build target does not exist"""'], {}), "('Cannot check for previous tag as build target does not exist')\n", (16397, 16461), False, 'from gluetool import GlueError\n'), ((24531, 24544), 'concurrent.futures.wait', 'wait', (['futures'], {}), '(futures)\n', (24535, 24544), False, 'from concurrent.futures import ThreadPoolExecutor, wait\n'), ((4043, 4066), 'gluetool.action.Action.current_action', 'Action.current_action', ([], {}), '()\n', (4064, 4066), False, 'from gluetool.action import Action\n'), ((4219, 4236), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4231, 4236), False, 'import requests\n')]
|
#!/usr/bin/env python3
"""
Convert Hershey data with hmp file to create DrawBot python module file
"""
from struct import pack
import re
from parse import parse
vectors = {}
vectors_count = {}
vectors_used = {}
def hershey_load(glyph_file_name):
"""
Load Hershey glyphs
"""
global vectors, vectors_count, vectors_used
vectors = {}
vectors_count = {}
vectors_used = {}
print(glyph_file_name)
# Read the glyphs file handling the continuation line
with open(glyph_file_name, "r") as file:
for raw_line in file:
match = re.match('^([0-9 ]{4}[0-9]{1})([0-9 ]{2}[0-9]{1})(.*)$', raw_line.rstrip())
if match:
glyph_num = int(match.group(1))
glyph_len = int(match.group(2))
vectors[glyph_num] = match.group(3)
vectors_count[glyph_num] = glyph_len -1
vectors_used[glyph_num] = False
else:
line = raw_line.rstrip()
vectors[glyph_num] += line
def map_to_py(map_file_name, font_file_name):
"""
Convert Hershey data with hmp file to create python module file
"""
global vectors, vectors_count, vectors_used
offsets = {}
offset = 0
font_vectors = {}
font_count = {}
print(map_file_name, font_file_name)
# Read the map file and build FONT[]
with open(map_file_name, "r") as file:
glyph_counter = 0
for raw_line in file:
hmp_entry = parse("{:d} {:d}", raw_line)
if hmp_entry:
if hmp_entry[1] is not 0:
for glyph in range(hmp_entry[0], hmp_entry[1]+1):
if vectors[glyph] is not None:
vectors_used[glyph] = True
font_vectors[glyph_counter] = vectors.get(glyph)
font_count[glyph_counter] = vectors_count.get(glyph)
offsets[glyph_counter] = offset
offset += len(font_vectors[glyph_counter])+1
glyph_counter += 1
else:
raise Exception("glyph {glyph} referenced but not found.")
else:
glyph = hmp_entry[0]
vectors_used[glyph] = True
font_vectors[glyph_counter] = vectors.get(glyph)
font_count[glyph_counter] = vectors_count.get(glyph)
offsets[glyph_counter] = offset
offset += len(font_vectors[glyph_counter])+1
glyph_counter += 1
# Write the font_data to the file
with open(font_file_name, "wt") as outfile:
# number of glyphs in font
print(f'def glyphs():\n\treturn {glyph_counter}\n', file=outfile)
font_data = bytes()
# vectors for each glyph in the font
for glyph in font_vectors:
print("cnt:", font_count[glyph], "vect:", font_vectors[glyph])
print("")
f_c = bytearray(font_count[glyph].to_bytes(1, byteorder='little'))
f_v = bytearray(font_vectors[glyph], 'utf-8')
font_data += f_c + f_v
print("f_c:", f_c, "f_v", f_v)
print("_font =\\", file=outfile)
print("b'", file=outfile, sep='', end='')
count = 0
for byte in (font_data):
print(f'\\x{byte:02x}', file=outfile, sep='', end='', )
count += 1
if count == 15:
print("'\\\nb'", file=outfile, sep='', end='')
count = 0
print("'", file=outfile)
# 16 bit integer table to the start of the vector data for each glyph in the font
index_data = bytes()
for offset in offsets:
print("for offset:", offsets[offset])
index_data += bytearray(pack('H', offsets[offset]))
print("\n_index =\\", file=outfile)
print("b'", file=outfile, sep='', end='')
count = 0
for byte in (index_data):
print(f'\\x{byte:02x}', file=outfile, sep='', end='', )
count += 1
if count == 15:
print("'\\\nb'", file=outfile, sep='', end='')
count = 0
print("'", file=outfile)
count = 0
print ("""
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_ch(ordch):
offset = _chr_addr(ordch if 32 <= ordch <= 127 else ord('?'))
count = _font[offset]
return _mvfont[offset:offset+(count+2)*2-1]
""", file=outfile)
hershey_load("hershey/hersh-fixed.oc")
map_to_py("hershey/astrol.hmp", "pyfont/astrol.py")
map_to_py("hershey/cyrilc.hmp", "pyfont/cyrilc.py")
map_to_py("hershey/gotheng.hmp", "pyfont/gotheng.py")
map_to_py("hershey/gothger.hmp", "pyfont/gothger.py")
map_to_py("hershey/gothita.hmp", "pyfont/gothita.py")
map_to_py("hershey/greekc.hmp", "pyfont/greekc.py")
map_to_py("hershey/greekcs.hmp", "pyfont/greekcs.py")
map_to_py("hershey/greeks.hmp", "pyfont/greeks.py")
map_to_py("hershey/greekp.hmp", "pyfont/greekp.py")
map_to_py("hershey/italicc.hmp", "pyfont/italicc.py")
map_to_py("hershey/italiccs.hmp", "pyfont/italiccs.py")
map_to_py("hershey/italict.hmp", "pyfont/italict.py")
map_to_py("hershey/lowmat.hmp", "pyfont/lowmat.py")
map_to_py("hershey/marker.hmp", "pyfont/marker.py")
map_to_py("hershey/meteo.hmp", "pyfont/meteo.py")
map_to_py("hershey/music.hmp", "pyfont/music.py")
map_to_py("hershey/romanc.hmp", "pyfont/romanc.py")
map_to_py("hershey/romancs.hmp", "pyfont/romancs.py")
map_to_py("hershey/romand.hmp", "pyfont/romand.py")
map_to_py("hershey/romans.hmp", "pyfont/romans.py")
map_to_py("hershey/romant.hmp", "pyfont/romant.py")
map_to_py("hershey/scriptc.hmp", "pyfont/scriptc.py")
map_to_py("hershey/scripts.hmp", "pyfont/scripts.py")
map_to_py("hershey/symbol.hmp", "pyfont/symbol.py")
map_to_py("hershey/uppmat.hmp", "pyfont/uppmat.py")
map_to_py("hershey/romanp.hmp", "pyfont/romanp.py")
with open("hershey/misc.hmp", "w") as file:
for glyph in vectors_used:
if not vectors_used[glyph]:
print(f'{glyph} 0', file=file)
map_to_py("hershey/misc.hmp", "pyfont/misc.py")
print('glyph map:')
character = 0
for glyph in vectors_used:
if not vectors_used[glyph]:
print(f'{character:X} {glyph}')
character += 1
hershey_load("hershey/hersh.or")
map_to_py("hershey/japan.hmp", "pyfont/japan.py")
|
[
"parse.parse",
"struct.pack"
] |
[((1497, 1525), 'parse.parse', 'parse', (['"""{:d} {:d}"""', 'raw_line'], {}), "('{:d} {:d}', raw_line)\n", (1502, 1525), False, 'from parse import parse\n'), ((3879, 3905), 'struct.pack', 'pack', (['"""H"""', 'offsets[offset]'], {}), "('H', offsets[offset])\n", (3883, 3905), False, 'from struct import pack\n')]
|
#!/usr/bin/env python
"""
This is a Python script that prints an FLP
"""
import OpenFL.Printer as P
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Print a .flp")
parser.add_argument('input', metavar='input', type=str,
help='source flp file')
args = parser.parse_args()
p = P.Printer()
p.initialize()
p.write_block(0, args.input)
p.start_printing(0)
|
[
"OpenFL.Printer.Printer"
] |
[((342, 353), 'OpenFL.Printer.Printer', 'P.Printer', ([], {}), '()\n', (351, 353), True, 'import OpenFL.Printer as P\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=redefined-outer-name
"""Tests for the :mod:`aiida.orm.nodes.data.array.bands` module."""
from argparse import Namespace
import pytest
from aiida.common.exceptions import NotExistent
from aiida.orm import BandsData, Group, User
from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure
@pytest.fixture
def alternate_user():
"""Return an alternate ``User`` instance that is not the current default user."""
email = 'alternate<EMAIL>'
try:
return User.objects.get(email=email)
except NotExistent:
return User(email='alternate<EMAIL>').store()
class TestGetBandsAndParentsStructure:
"""Tests for the :meth:`~aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure` function."""
@staticmethod
def _get_default_ns():
"""Returns a simple template Namespace"""
args = Namespace()
args.element = None
args.element_only = None
args.formulamode = None
args.past_days = None
args.group_name = None
args.group_pk = None
args.all_users = False
return args
@pytest.mark.parametrize('all_users, expected', ((True, [True, True]), (False, [True, False])))
@pytest.mark.usefixtures('clear_database_before_test')
def test_all_users(self, alternate_user, all_users, expected):
"""Test the behavior for the ``all_users`` argument."""
bands_default_user = BandsData().store()
bands_alternate_user = BandsData(user=alternate_user).store()
bands = [bands_default_user, bands_alternate_user]
args = self._get_default_ns()
args.all_users = all_users
entries = get_bands_and_parents_structure(args)
node_pks = [int(e[0]) for e in entries]
assert [node.pk in node_pks for node in bands] == expected
@pytest.mark.parametrize('argument, attribute', (('group_name', 'label'), ('group_pk', 'pk')))
@pytest.mark.usefixtures('clear_database_before_test')
def test_identifier(self, argument, attribute):
"""Test the behavior for the ``group_name`` and ``group_pk`` arguments."""
bands_data_grouped = BandsData().store()
_ = BandsData().store()
bands_group = Group('some_bands_data').store()
bands_group.add_nodes(bands_data_grouped)
args = self._get_default_ns()
setattr(args, argument, [getattr(bands_group, attribute)])
entries = get_bands_and_parents_structure(args)
assert [int(e[0]) for e in entries] == [bands_data_grouped.pk]
|
[
"argparse.Namespace",
"aiida.orm.User.objects.get",
"aiida.orm.BandsData",
"aiida.orm.Group",
"aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure",
"aiida.orm.User",
"pytest.mark.parametrize",
"pytest.mark.usefixtures"
] |
[((1757, 1856), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""all_users, expected"""', '((True, [True, True]), (False, [True, False]))'], {}), "('all_users, expected', ((True, [True, True]), (\n False, [True, False])))\n", (1780, 1856), False, 'import pytest\n'), ((1857, 1910), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clear_database_before_test"""'], {}), "('clear_database_before_test')\n", (1880, 1910), False, 'import pytest\n'), ((2472, 2570), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""argument, attribute"""', "(('group_name', 'label'), ('group_pk', 'pk'))"], {}), "('argument, attribute', (('group_name', 'label'), (\n 'group_pk', 'pk')))\n", (2495, 2570), False, 'import pytest\n'), ((2571, 2624), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clear_database_before_test"""'], {}), "('clear_database_before_test')\n", (2594, 2624), False, 'import pytest\n'), ((1137, 1166), 'aiida.orm.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (1153, 1166), False, 'from aiida.orm import BandsData, Group, User\n'), ((1505, 1516), 'argparse.Namespace', 'Namespace', ([], {}), '()\n', (1514, 1516), False, 'from argparse import Namespace\n'), ((2313, 2350), 'aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure', 'get_bands_and_parents_structure', (['args'], {}), '(args)\n', (2344, 2350), False, 'from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure\n'), ((3071, 3108), 'aiida.orm.nodes.data.array.bands.get_bands_and_parents_structure', 'get_bands_and_parents_structure', (['args'], {}), '(args)\n', (3102, 3108), False, 'from aiida.orm.nodes.data.array.bands import get_bands_and_parents_structure\n'), ((2071, 2082), 'aiida.orm.BandsData', 'BandsData', ([], {}), '()\n', (2080, 2082), False, 'from aiida.orm import BandsData, Group, User\n'), ((2122, 2152), 'aiida.orm.BandsData', 'BandsData', ([], {'user': 'alternate_user'}), '(user=alternate_user)\n', (2131, 2152), False, 'from aiida.orm import BandsData, Group, User\n'), ((2789, 2800), 'aiida.orm.BandsData', 'BandsData', ([], {}), '()\n', (2798, 2800), False, 'from aiida.orm import BandsData, Group, User\n'), ((2821, 2832), 'aiida.orm.BandsData', 'BandsData', ([], {}), '()\n', (2830, 2832), False, 'from aiida.orm import BandsData, Group, User\n'), ((2863, 2887), 'aiida.orm.Group', 'Group', (['"""some_bands_data"""'], {}), "('some_bands_data')\n", (2868, 2887), False, 'from aiida.orm import BandsData, Group, User\n'), ((1206, 1236), 'aiida.orm.User', 'User', ([], {'email': '"""alternate<EMAIL>"""'}), "(email='alternate<EMAIL>')\n", (1210, 1236), False, 'from aiida.orm import BandsData, Group, User\n')]
|
from django.contrib.auth.models import User
from django.db import models
from PIL import Image
class Profile(models.Model):
contact_no = models.CharField(max_length=20)
address = models.CharField(max_length=200)
image = models.ImageField(help_text='425x425px recommmended', upload_to='profile_pics')
title = models.CharField(max_length=100, blank=True)
linkedin_url = models.CharField(max_length=100)
github_url = models.CharField(max_length=50)
about_me = models.CharField(max_length=500)
cv_link = models.CharField(max_length=255, blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
def __str__(self):
return f'{self.user.username} Profile'
# Override the save function in Profile class:
def save(self, *args, **kwargs):
# run the parent class' save() function:
super().save(*args, **kwargs)
# open the image of the current instance:
img = Image.open(self.image.path)
if img.height > 425 or img.width > 425:
output_size = (425, 425)
img.thumbnail(output_size)
img.save(self.image.path)
class Focus(models.Model):
name = models.CharField(max_length=50)
icon = models.CharField(max_length=20)
color = models.CharField(max_length=20, default='white')
description = models.CharField(max_length=500)
is_active = models.BooleanField(default=True)
def __str__(self):
return f'{self.name} - Active: {self.is_active}'
class TechnicalSkill(models.Model):
name = models.CharField(max_length=20)
is_top_skill = models.BooleanField(default=True)
percentage = models.IntegerField()
def __str__(self):
return f'{self.name} - Top Skill: {self.is_top_skill}'
class ProfessionalSkill(models.Model):
name = models.CharField(max_length=20)
percentage = models.IntegerField()
def __str__(self):
return self.name
class Education(models.Model):
school = models.CharField(max_length=100)
duration = models.CharField(max_length=15)
level = models.CharField(max_length=200)
address = models.CharField(max_length=200)
achievements = models.TextField(max_length=500, blank=True)
def __str__(self):
return f'{self.level} - {self.school}'
class WorkExperience(models.Model):
position = models.CharField(max_length=100)
company = models.CharField(max_length=100)
duration = models.CharField(max_length=30)
address = models.CharField(max_length=200)
summary = models.TextField(max_length=500, blank=True)
def __str__(self):
return f'{self.position} - {self.company}'
class ProjectCategory(models.Model):
name = models.CharField(max_length=30)
code = models.CharField(max_length=20)
def __str__(self):
return self.name
class Project(models.Model):
title = models.CharField(max_length=200)
code = models.CharField(max_length=20, blank=True)
description = models.TextField()
date_started = models.CharField(max_length=20, blank=True)
date_ended = models.CharField(max_length=20, blank=True)
main_image = models.ImageField(upload_to='project_images', default='')
repo_link = models.CharField(max_length=50, blank=True)
demo_link = models.CharField(max_length=50, blank=True)
document_link = models.CharField(max_length=255, blank=True)
project_category = models.ForeignKey(ProjectCategory, on_delete=models.CASCADE, related_name='projects')
def __str__(self):
return self.title
class ToolsAndTech(models.Model):
name = models.CharField(max_length=30)
project = models.ManyToManyField(Project, related_name='toolsandtechs')
def __str__(self):
return self.name
class ProjectImage(models.Model):
image = models.ImageField(upload_to='project_images')
caption = models.CharField(max_length=100, blank=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='projectimages')
def __str__(self):
return f'{self.project.code} - {self.image.name}'
class Recommendation(models.Model):
name = models.CharField(max_length=40)
message = models.CharField(max_length=400)
image = models.ImageField(upload_to='recommendations', default='recommendations/default')
summary = models.CharField(max_length=50)
def __str__(self):
return f'{self.name} - {self.summary}'
class Certification(models.Model):
title = models.CharField(max_length=100)
authority = models.CharField(max_length=30)
date_issued = models.CharField(max_length=20)
document_link = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
class Seminar(models.Model):
title = models.CharField(max_length=100)
organizer = models.CharField(max_length=30)
event_date = models.CharField(max_length=20)
link_proof = models.CharField(max_length=200, blank=True)
link_icon = models.CharField(max_length=20, blank=True)
document_link = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"PIL.Image.open",
"django.db.models.ImageField",
"django.db.models.IntegerField"
] |
[((143, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (159, 174), False, 'from django.db import models\n'), ((189, 221), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (205, 221), False, 'from django.db import models\n'), ((234, 313), 'django.db.models.ImageField', 'models.ImageField', ([], {'help_text': '"""425x425px recommmended"""', 'upload_to': '"""profile_pics"""'}), "(help_text='425x425px recommmended', upload_to='profile_pics')\n", (251, 313), False, 'from django.db import models\n'), ((326, 370), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (342, 370), False, 'from django.db import models\n'), ((390, 422), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (406, 422), False, 'from django.db import models\n'), ((440, 471), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (456, 471), False, 'from django.db import models\n'), ((487, 519), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (503, 519), False, 'from django.db import models\n'), ((534, 578), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (550, 578), False, 'from django.db import models\n'), ((590, 660), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'primary_key': '(True)'}), '(User, on_delete=models.CASCADE, primary_key=True)\n', (610, 660), False, 'from django.db import models\n'), ((1204, 1235), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1220, 1235), False, 'from django.db import models\n'), ((1247, 1278), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1263, 1278), False, 'from django.db import models\n'), ((1291, 1339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""white"""'}), "(max_length=20, default='white')\n", (1307, 1339), False, 'from django.db import models\n'), ((1358, 1390), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1374, 1390), False, 'from django.db import models\n'), ((1407, 1440), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1426, 1440), False, 'from django.db import models\n'), ((1571, 1602), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1587, 1602), False, 'from django.db import models\n'), ((1622, 1655), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1641, 1655), False, 'from django.db import models\n'), ((1673, 1694), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1692, 1694), False, 'from django.db import models\n'), ((1834, 1865), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1850, 1865), False, 'from django.db import models\n'), ((1883, 1904), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1902, 1904), False, 'from django.db import models\n'), ((2000, 2032), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2016, 2032), False, 'from django.db import models\n'), ((2048, 2079), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (2064, 2079), False, 'from django.db import models\n'), ((2092, 2124), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2108, 2124), False, 'from django.db import models\n'), ((2139, 2171), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2155, 2171), False, 'from django.db import models\n'), ((2191, 2235), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'blank': '(True)'}), '(max_length=500, blank=True)\n', (2207, 2235), False, 'from django.db import models\n'), ((2360, 2392), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2376, 2392), False, 'from django.db import models\n'), ((2407, 2439), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2423, 2439), False, 'from django.db import models\n'), ((2455, 2486), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2471, 2486), False, 'from django.db import models\n'), ((2501, 2533), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2517, 2533), False, 'from django.db import models\n'), ((2548, 2592), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'blank': '(True)'}), '(max_length=500, blank=True)\n', (2564, 2592), False, 'from django.db import models\n'), ((2718, 2749), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2734, 2749), False, 'from django.db import models\n'), ((2761, 2792), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2777, 2792), False, 'from django.db import models\n'), ((2885, 2917), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2901, 2917), False, 'from django.db import models\n'), ((2929, 2972), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (2945, 2972), False, 'from django.db import models\n'), ((2991, 3009), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3007, 3009), False, 'from django.db import models\n'), ((3029, 3072), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (3045, 3072), False, 'from django.db import models\n'), ((3090, 3133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (3106, 3133), False, 'from django.db import models\n'), ((3151, 3208), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""project_images"""', 'default': '""""""'}), "(upload_to='project_images', default='')\n", (3168, 3208), False, 'from django.db import models\n'), ((3225, 3268), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (3241, 3268), False, 'from django.db import models\n'), ((3285, 3328), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (3301, 3328), False, 'from django.db import models\n'), ((3349, 3393), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (3365, 3393), False, 'from django.db import models\n'), ((3417, 3507), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ProjectCategory'], {'on_delete': 'models.CASCADE', 'related_name': '"""projects"""'}), "(ProjectCategory, on_delete=models.CASCADE, related_name=\n 'projects')\n", (3434, 3507), False, 'from django.db import models\n'), ((3600, 3631), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (3616, 3631), False, 'from django.db import models\n'), ((3646, 3707), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Project'], {'related_name': '"""toolsandtechs"""'}), "(Project, related_name='toolsandtechs')\n", (3668, 3707), False, 'from django.db import models\n'), ((3805, 3850), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""project_images"""'}), "(upload_to='project_images')\n", (3822, 3850), False, 'from django.db import models\n'), ((3865, 3909), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (3881, 3909), False, 'from django.db import models\n'), ((3924, 4011), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Project'], {'on_delete': 'models.CASCADE', 'related_name': '"""projectimages"""'}), "(Project, on_delete=models.CASCADE, related_name=\n 'projectimages')\n", (3941, 4011), False, 'from django.db import models\n'), ((4138, 4169), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (4154, 4169), False, 'from django.db import models\n'), ((4184, 4216), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(400)'}), '(max_length=400)\n', (4200, 4216), False, 'from django.db import models\n'), ((4229, 4315), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""recommendations"""', 'default': '"""recommendations/default"""'}), "(upload_to='recommendations', default=\n 'recommendations/default')\n", (4246, 4315), False, 'from django.db import models\n'), ((4325, 4356), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4341, 4356), False, 'from django.db import models\n'), ((4478, 4510), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4494, 4510), False, 'from django.db import models\n'), ((4527, 4558), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (4543, 4558), False, 'from django.db import models\n'), ((4577, 4608), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (4593, 4608), False, 'from django.db import models\n'), ((4629, 4673), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (4645, 4673), False, 'from django.db import models\n'), ((4767, 4799), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (4783, 4799), False, 'from django.db import models\n'), ((4816, 4847), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (4832, 4847), False, 'from django.db import models\n'), ((4865, 4896), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (4881, 4896), False, 'from django.db import models\n'), ((4914, 4958), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (4930, 4958), False, 'from django.db import models\n'), ((4975, 5018), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (4991, 5018), False, 'from django.db import models\n'), ((5039, 5083), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (5055, 5083), False, 'from django.db import models\n'), ((973, 1000), 'PIL.Image.open', 'Image.open', (['self.image.path'], {}), '(self.image.path)\n', (983, 1000), False, 'from PIL import Image\n')]
|
#!/usr/bin/python3
##########################################################################
# Tables used:
# - OCI_USAGE - Raw data of the usage reports
# - OCI_USAGE_STATS - Summary Stats of the Usage Report for quick query if only filtered by tenant and date
# - OCI_USAGE_TAG_KEYS - Tag keys of the usage reports
# - OCI_COST - Raw data of the cost reports
# - OCI_COST_STATS - Summary Stats of the Cost Report for quick query if only filtered by tenant and date
# - OCI_COST_TAG_KEYS - Tag keys of the cost reports
# - OCI_COST_REFERENCE - Reference table of the cost filter keys - SERVICE, REGION, COMPARTMENT, PRODUCT, SUBSCRIPTION
# - OCI_PRICE_LIST - Hold the price list and the cost per product
##########################################################################
import sys
import argparse
import datetime
import oci
import gzip
import os
import csv
import requests
import time
import pandas as pd
import json
version = "20.07.28"
usage_report_namespace = "bling"
work_report_dir = os.curdir + "/work_report_dir_temp"
# create the work dir if not exist
if not os.path.exists(work_report_dir):
os.mkdir(work_report_dir)
##########################################################################
# Print header centered
##########################################################################
def print_header(name, category):
options = {0: 90, 1: 60, 2: 30}
chars = int(options[category])
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Get Column from Array
##########################################################################
def get_column_value_from_array(column, array):
if column in array:
return array[column]
else:
return ""
##########################################################################
# Create signer
##########################################################################
def create_signer(cmd):
# assign default values
config_file = oci.config.DEFAULT_LOCATION
config_section = oci.config.DEFAULT_PROFILE
if cmd.config:
if cmd.config.name:
config_file = cmd.config.name
if cmd.profile:
config_section = cmd.profile
if cmd.instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting", 0)
raise SystemExit
else:
config = oci.config.from_file(config_file, config_section)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##########################################################################
# Load compartments
##########################################################################
def identity_read_compartments(identity, tenancy):
compartments = []
#print("Loading Compartments...")
try:
# read all compartments to variable
all_compartments = []
try:
all_compartments = oci.pagination.list_call_get_all_results(
identity.list_compartments,
tenancy.id,
compartment_id_in_subtree=True
).data
except oci.exceptions.ServiceError:
raise
###################################################
# Build Compartments - return nested compartment list
###################################################
def build_compartments_nested(identity_client, cid, path):
try:
compartment_list = [item for item in all_compartments if str(item.compartment_id) == str(cid)]
if path != "":
path = path + " / "
for c in compartment_list:
if c.lifecycle_state == oci.identity.models.Compartment.LIFECYCLE_STATE_ACTIVE:
cvalue = {'id': str(c.id), 'name': str(c.name), 'path': path + str(c.name)}
compartments.append(cvalue)
build_compartments_nested(identity_client, c.id, cvalue['path'])
except Exception as error:
raise Exception("Error in build_compartments_nested: " + str(error.args))
###################################################
# Add root compartment
###################################################
value = {'id': str(tenancy.id), 'name': str(tenancy.name) + " (root)", 'path': "/ " + str(tenancy.name) + " (root)"}
compartments.append(value)
# Build the compartments
build_compartments_nested(identity, str(tenancy.id), "")
# sort the compartment
sorted_compartments = sorted(compartments, key=lambda k: k['path'])
#print(" Total " + str(len(sorted_compartments)) + " compartments loaded.")
return sorted_compartments
except oci.exceptions.RequestException:
raise
except Exception as e:
raise Exception("Error in identity_read_compartments: " + str(e.args))
##########################################################################
# set parser
##########################################################################
def set_parser_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=argparse.FileType('r'), dest='config', help="Config File")
parser.add_argument('-t', default="", dest='profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-f', default="", dest='fileid', help='File Id to load')
parser.add_argument('-d', default="", dest='filedate', help='Minimum File Date to load (i.e. yyyy-mm-dd)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-su', action='store_true', default=False, dest='skip_usage', help='Skip Load Usage Files')
parser.add_argument('-sc', action='store_true', default=False, dest='skip_cost', help='Skip Load Cost Files')
parser.add_argument('-ip', action='store_true', default=False, dest='instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
result = parser.parse_args()
return result
##########################################################################
# update_cost_stats
##########################################################################
def update_cost_stats(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_COST_STATS...")
# run merge to oci_update_stats
sql = "merge into OCI_COST_STATS a "
sql += "using "
sql += "( "
sql += " select "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START, "
sql += " sum(COST_MY_COST) COST_MY_COST, "
sql += " sum(COST_MY_COST_OVERAGE) COST_MY_COST_OVERAGE, "
sql += " min(COST_CURRENCY_CODE) COST_CURRENCY_CODE, "
sql += " count(*) NUM_ROWS "
sql += " from "
sql += " oci_cost "
sql += " group by "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START "
sql += ") b "
sql += "on (a.tenant_name=b.tenant_name and a.file_id=b.file_id and a.USAGE_INTERVAL_START=b.USAGE_INTERVAL_START) "
sql += "when matched then update set a.num_rows=b.num_rows, a.COST_MY_COST=b.COST_MY_COST, a.UPDATE_DATE=sysdate, a.AGENT_VERSION=:version,"
sql += " a.COST_MY_COST_OVERAGE=b.COST_MY_COST_OVERAGE, a.COST_CURRENCY_CODE=b.COST_CURRENCY_CODE "
sql += "where a.num_rows <> b.num_rows "
sql += "when not matched then insert (TENANT_NAME,FILE_ID,USAGE_INTERVAL_START,NUM_ROWS,COST_MY_COST,UPDATE_DATE,AGENT_VERSION,COST_MY_COST_OVERAGE,COST_CURRENCY_CODE) "
sql += " values (b.TENANT_NAME,b.FILE_ID,b.USAGE_INTERVAL_START,b.NUM_ROWS,b.COST_MY_COST,sysdate,:version,b.COST_MY_COST_OVERAGE,b.COST_CURRENCY_CODE) "
cursor.execute(sql, {"version": version})
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_cost_stats() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_cost_stats() - " + str(e))
##########################################################################
# update_price_list
##########################################################################
def update_price_list(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_PRICE_LIST...")
# run merge to oci_update_stats
sql = "MERGE INTO OCI_PRICE_LIST A "
sql += "USING "
sql += "( "
sql += " SELECT "
sql += " TENANT_NAME, "
sql += " COST_PRODUCT_SKU, "
sql += " PRD_DESCRIPTION, "
sql += " COST_CURRENCY_CODE, "
sql += " COST_UNIT_PRICE "
sql += " FROM "
sql += " ( "
sql += " SELECT "
sql += " TENANT_NAME, "
sql += " COST_PRODUCT_SKU, "
sql += " PRD_DESCRIPTION, "
sql += " COST_CURRENCY_CODE, "
sql += " COST_UNIT_PRICE, "
sql += " ROW_NUMBER() OVER (PARTITION BY TENANT_NAME, COST_PRODUCT_SKU ORDER BY USAGE_INTERVAL_START DESC, COST_UNIT_PRICE DESC) RN "
sql += " FROM OCI_COST A "
sql += " ) "
sql += " WHERE RN = 1 "
sql += " ORDER BY 1,2 "
sql += ") B "
sql += "ON (A.TENANT_NAME = B.TENANT_NAME AND A.COST_PRODUCT_SKU = B.COST_PRODUCT_SKU) "
sql += "WHEN MATCHED THEN UPDATE SET A.PRD_DESCRIPTION=B.PRD_DESCRIPTION, A.COST_CURRENCY_CODE=B.COST_CURRENCY_CODE, A.COST_UNIT_PRICE=B.COST_UNIT_PRICE, COST_LAST_UPDATE = SYSDATE "
sql += "WHEN NOT MATCHED THEN INSERT (TENANT_NAME,COST_PRODUCT_SKU,PRD_DESCRIPTION,COST_CURRENCY_CODE,COST_UNIT_PRICE,COST_LAST_UPDATE) "
sql += " VALUES (B.TENANT_NAME,B.COST_PRODUCT_SKU,B.PRD_DESCRIPTION,B.COST_CURRENCY_CODE,B.COST_UNIT_PRICE,SYSDATE)"
cursor.execute(sql)
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_price_list() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_price_list() - " + str(e))
##########################################################################
# update_cost_reference
##########################################################################
def update_cost_reference(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_COST_REFERENCE...")
# run merge to oci_update_stats
sql = "merge into OCI_COST_REFERENCE a "
sql += "using "
sql += "( "
sql += " select TENANT_NAME, REF_TYPE, REF_NAME "
sql += " from "
sql += " ( "
sql += " select distinct TENANT_NAME, 'PRD_SERVICE' as REF_TYPE, PRD_SERVICE as REF_NAME from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_COMPARTMENT_PATH' as REF_TYPE, "
sql += " case when prd_compartment_path like '%/%' then substr(prd_compartment_path,1,instr(prd_compartment_path,' /')-1) "
sql += " else prd_compartment_path end as REF_NAME "
sql += " from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_COMPARTMENT_NAME' as REF_TYPE, PRD_COMPARTMENT_NAME as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'PRD_REGION' as REF_TYPE, PRD_REGION as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'COST_SUBSCRIPTION_ID' as REF_TYPE, to_char(COST_SUBSCRIPTION_ID) as ref_name from OCI_COST "
sql += " union all "
sql += " select distinct TENANT_NAME, 'COST_PRODUCT_SKU' as REF_TYPE, COST_PRODUCT_SKU || ' '||min(PRD_DESCRIPTION) as ref_name from OCI_COST "
sql += " group by TENANT_NAME, COST_PRODUCT_SKU "
sql += " ) where ref_name is not null "
sql += ") b "
sql += "on (a.TENANT_NAME=b.TENANT_NAME and a.REF_TYPE=b.REF_TYPE and a.REF_NAME=b.REF_NAME) "
sql += "when not matched then insert (TENANT_NAME,REF_TYPE,REF_NAME) "
sql += "values (b.TENANT_NAME,b.REF_TYPE,b.REF_NAME)"
cursor.execute(sql)
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_cost_reference() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_cost_reference() - " + str(e))
##########################################################################
# update_public_rates
##########################################################################
def update_public_rates(connection, tenant_name):
try:
# open cursor
num_rows = 0
cursor = connection.cursor()
api_url = "https://itra.oraclecloud.com/itas/.anon/myservices/api/v1/products?partNumber="
#print("\nMerging Public Rates into OCI_RATE_CARD...")
# retrieve the SKUS to query
sql = "select COST_PRODUCT_SKU, COST_CURRENCY_CODE from OCI_PRICE_LIST where tenant_name=:tenant_name"
cursor.execute(sql, {"tenant_name": tenant_name})
rows = cursor.fetchall()
if rows:
for row in rows:
rate_description = ""
rate_price = None
resp = None
#######################################
# Call API to fetch the SKU Data
#######################################
try:
cost_product_sku = str(row[0])
country_code = str(row[1])
resp = requests.get(api_url + cost_product_sku, headers={'X-Oracle-Accept-CurrencyCode': country_code})
time.sleep(0.5)
except Exception as e:
print("\nWarning Calling REST API for Public Rate at update_public_rates() - " + str(e))
time.sleep(2)
continue
if not resp:
continue
for item in resp.json()['items']:
rate_description = item["displayName"]
for price in item['prices']:
if price['model'] == 'PAY_AS_YOU_GO':
rate_price = price['value']
# update database
sql = "update OCI_PRICE_LIST set "
sql += "RATE_DESCRIPTION=:rate_description, "
sql += "RATE_PAYGO_PRICE=:rate_price, "
sql += "RATE_MONTHLY_FLEX_PRICE=:rate_price, "
sql += "RATE_UPDATE_DATE=sysdate "
sql += "where TENANT_NAME=:tenant_name and COST_PRODUCT_SKU=:cost_product_sku "
# only apply paygo cost after 7/13 oracle change rate
sql_variables = {
"rate_description": rate_description,
"rate_price": rate_price,
"tenant_name": tenant_name,
"cost_product_sku": cost_product_sku
}
cursor.execute(sql, sql_variables)
num_rows += 1
# Commit
connection.commit()
#print(" Update Completed, " + str(num_rows) + " rows updated.")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_public_rates() - " + str(e) + "\n")
raise SystemExit
except requests.exceptions.ConnectionError as e:
print("\nError connecting to billing metering API at update_public_rates() - " + str(e))
except Exception as e:
raise Exception("\nError manipulating database at update_public_rates() - " + str(e))
##########################################################################
# update_usage_stats
##########################################################################
def update_usage_stats(connection):
try:
# open cursor
cursor = connection.cursor()
#print("\nMerging statistics into OCI_USAGE_STATS...")
# run merge to oci_update_stats
sql = "merge into OCI_USAGE_STATS a "
sql += "using "
sql += "( "
sql += " select "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START, "
sql += " count(*) NUM_ROWS "
sql += " from "
sql += " oci_usage "
sql += " group by "
sql += " tenant_name, "
sql += " file_id, "
sql += " USAGE_INTERVAL_START "
sql += ") b "
sql += "on (a.tenant_name=b.tenant_name and a.file_id=b.file_id and a.USAGE_INTERVAL_START=b.USAGE_INTERVAL_START) "
sql += "when matched then update set a.num_rows=b.num_rows, a.UPDATE_DATE=sysdate, a.AGENT_VERSION=:version "
sql += "where a.num_rows <> b.num_rows "
sql += "when not matched then insert (TENANT_NAME,FILE_ID,USAGE_INTERVAL_START,NUM_ROWS,UPDATE_DATE,AGENT_VERSION) "
sql += " values (b.TENANT_NAME,b.FILE_ID,b.USAGE_INTERVAL_START,b.NUM_ROWS,sysdate,:version) "
cursor.execute(sql, {"version": version})
connection.commit()
#print(" Merge Completed, " + str(cursor.rowcount) + " rows merged")
cursor.close()
except cx_Oracle.DatabaseError as e:
print("\nError manipulating database at update_usage_stats() - " + str(e) + "\n")
raise SystemExit
except Exception as e:
raise Exception("\nError manipulating database at update_usage_stats() - " + str(e))
#########################################################################
# Load Cost File
##########################################################################
def load_cost_file(object_storage, object_file, max_file_id, cmd, tenancy, compartments):
num_files = 0
num_rows = 0
try:
o = object_file
# keep tag keys per file
tags_keys = []
# get file name
filename = o.name.rsplit('/', 1)[-1]
file_id = filename[:-7]
file_time = str(o.time_created)[0:16]
# if file already loaded, skip (check if < max_file_id
if str(max_file_id) != "None":
if file_id <= str(max_file_id):
return num_files
# if file id enabled, check
if cmd.fileid:
if file_id != cmd.fileid:
return num_files
# check file date
if cmd.filedate:
if file_time <= cmd.filedate:
return num_files
path_filename = work_report_dir + '/' + filename
#print(" Processing file " + o.name + " - " + str(o.size) + " bytes, " + file_time)
# download file
object_details = object_storage.get_object(usage_report_namespace, str(tenancy.id), o.name)
with open(path_filename, 'wb') as f:
for chunk in object_details.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
#incluir código de conversão para json
f = open(path_filename[:-3], "w")
f.write(file_in.read())
f.close()
df = pd.read_csv (path_filename[:-3])
df.to_json (path_filename[:-3][:-3] + "json")
f = open(path_filename[:-3][:-3] + "json", "r")
dado = f.read()
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/insertjson'
myobj = {'id_arquivo': filename[:-3][:-3], 'tenant_name': tenancy.name, 'tp_arquivo': 'cost', 'json': dado}
f.close()
x = requests.post(url, data = myobj)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
# Adjust the batch size to meet memory and performance requirements for cx_oracle
batch_size = 5000
array_size = 1000
data = []
for row in csv_reader:
# find compartment path
compartment_path = ""
for c in compartments:
if c['id'] == row['product/compartmentId']:
compartment_path = c['path']
# Handle Tags up to 4000 chars with # seperator
tags_data = ""
for (key, value) in row.items():
if 'tags' in key and len(value) > 0:
# remove # and = from the tags keys and value
keyadj = str(key).replace("tags/", "").replace("#", "").replace("=", "")
valueadj = str(value).replace("#", "").replace("=", "")
# check if length < 4000 to avoid overflow database column
if len(tags_data) + len(keyadj) + len(valueadj) + 2 < 4000:
tags_data += ("#" if tags_data == "" else "") + keyadj + "=" + valueadj + "#"
# add tag key to tag_keys array
if keyadj not in tags_keys:
tags_keys.append(keyadj)
# Assign each column to variable to avoid error if column missing from the file
lineItem_intervalUsageStart = get_column_value_from_array('lineItem/intervalUsageStart', row)
lineItem_intervalUsageEnd = get_column_value_from_array('lineItem/intervalUsageEnd', row)
product_service = get_column_value_from_array('product/service', row)
product_compartmentId = get_column_value_from_array('product/compartmentId', row)
product_compartmentName = get_column_value_from_array('product/compartmentName', row)
product_region = get_column_value_from_array('product/region', row)
product_availabilityDomain = get_column_value_from_array('product/availabilityDomain', row)
product_resourceId = get_column_value_from_array('product/resourceId', row)
usage_billedQuantity = get_column_value_from_array('usage/billedQuantity', row)
usage_billedQuantityOverage = get_column_value_from_array('usage/billedQuantityOverage', row)
cost_subscriptionId = get_column_value_from_array('cost/subscriptionId', row)
cost_productSku = get_column_value_from_array('cost/productSku', row)
product_Description = get_column_value_from_array('product/Description', row)
cost_unitPrice = get_column_value_from_array('cost/unitPrice', row)
cost_unitPriceOverage = get_column_value_from_array('cost/unitPriceOverage', row)
cost_myCost = get_column_value_from_array('cost/myCost', row)
cost_myCostOverage = get_column_value_from_array('cost/myCostOverage', row)
cost_currencyCode = get_column_value_from_array('cost/currencyCode', row)
cost_overageFlag = get_column_value_from_array('cost/overageFlag', row)
lineItem_isCorrection = get_column_value_from_array('lineItem/isCorrection', row)
# OCI changed the column billingUnitReadable to skuUnitDescription
if 'cost/skuUnitDescription' in row:
cost_billingUnitReadable = get_column_value_from_array('cost/skuUnitDescription', row)
else:
cost_billingUnitReadable = get_column_value_from_array('cost/billingUnitReadable', row)
# Fix OCI Data for missing product description
if cost_productSku == "B88285" and product_Description == "":
product_Description = "Object Storage Classic"
cost_billingUnitReadable = "Gigabyte Storage Capacity per Month"
elif cost_productSku == "B88272" and product_Description == "":
product_Description = "Compute Classic - Unassociated Static IP"
cost_billingUnitReadable = "IPs"
elif cost_productSku == "B88166" and product_Description == "":
product_Description = "Oracle Identity Cloud - Standard"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88167" and product_Description == "":
product_Description = "Oracle Identity Cloud - Basic"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88168" and product_Description == "":
product_Description = "Oracle Identity Cloud - Basic - Consumer User"
cost_billingUnitReadable = "Active User per Hour"
elif cost_productSku == "B88274" and product_Description == "":
product_Description = "Block Storage Classic"
cost_billingUnitReadable = "Gigabyte Storage Capacity per Month"
elif cost_productSku == "B89164" and product_Description == "":
product_Description = "Oracle Security Monitoring and Compliance Edition"
cost_billingUnitReadable = "100 Entities Per Hour"
elif cost_productSku == "B88269" and product_Description == "":
product_Description = "Compute Classic"
cost_billingUnitReadable = "OCPU Per Hour "
elif cost_productSku == "B88269" and product_Description == "":
product_Description = "Compute Classic"
cost_billingUnitReadable = "OCPU Per Hour"
elif cost_productSku == "B88275" and product_Description == "":
product_Description = "Block Storage Classic - High I/O"
cost_billingUnitReadable = "Gigabyte Storage Per Month"
elif cost_productSku == "B88283" and product_Description == "":
product_Description = "Object Storage Classic - GET and all other Requests"
cost_billingUnitReadable = "10,000 Requests Per Month"
elif cost_productSku == "B88284" and product_Description == "":
product_Description = "Object Storage Classic - PUT, COPY, POST or LIST Requests"
cost_billingUnitReadable = "10,000 Requests Per Month"
num_rows += 1
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/cost/' + str(tenancy.name)
myobj = {
'a1': str(tenancy.name),
'a2': file_id,
'a3': lineItem_intervalUsageStart[0:10] + " " + lineItem_intervalUsageStart[11:16],
'a4': lineItem_intervalUsageEnd[0:10] + " " + lineItem_intervalUsageEnd[11:16],
'a5': product_service,
'a6': product_compartmentId,
'a7': product_compartmentName,
'a8': compartment_path,
'a9': product_region,
'a10': product_availabilityDomain,
'a11': product_resourceId,
'a12': usage_billedQuantity,
'a13': usage_billedQuantityOverage,
'a14': cost_subscriptionId,
'a15': cost_productSku,
'a16': product_Description,
'a17': cost_unitPrice,
'a18': cost_unitPriceOverage,
'a19': cost_myCost,
'a20': cost_myCostOverage,
'a21': cost_currencyCode,
'a22': cost_billingUnitReadable,
'a23': cost_overageFlag,
'a24': lineItem_isCorrection,
'a25': tags_data
}
x = requests.post(url, data = myobj)
#print(" Completed file " + o.name + " - " + str(num_rows) + " Rows Inserted")
num_files += 1
# remove file
os.remove(path_filename)
os.remove(path_filename[:-3])
os.remove(path_filename[:-3][:-3] + "json")
#######################################
# insert bulk tags to the database
#######################################
data = []
for tag in tags_keys:
row_data = (str(tenancy.name), tag, str(tenancy.name), tag)
data.append(row_data)
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/costtags/' + str(tenancy.name)
myobj = {'tag': tag}
x = requests.post(url, data = myobj)
return num_files
except Exception as e:
print("\nload_cost_file() - Error Download Usage and insert to database 01 - " + str(e))
raise SystemExit
#########################################################################
# Load Usage File
##########################################################################
def load_usage_file(object_storage, object_file, max_file_id, cmd, tenancy, compartments):
num_files = 0
num_rows = 0
try:
o = object_file
# keep tag keys per file
tags_keys = []
# get file name
filename = o.name.rsplit('/', 1)[-1]
file_id = filename[:-7]
file_time = str(o.time_created)[0:16]
# if file already loaded, skip (check if < max_usage_file_id)
if str(max_file_id) != "None":
if file_id <= str(max_file_id):
return num_files
# if file id enabled, check
if cmd.fileid:
if file_id != cmd.file_id:
return num_files
# check file date
if cmd.filedate:
if file_time <= cmd.filedate:
return num_files
path_filename = work_report_dir + '/' + filename
#print(" Processing file " + o.name + " - " + str(o.size) + " bytes, " + file_time)
# download file
object_details = object_storage.get_object(usage_report_namespace, str(tenancy.id), o.name)
with open(path_filename, 'wb') as f:
for chunk in object_details.data.raw.stream(1024 * 1024, decode_content=False):
f.write(chunk)
# Read file to variable
with gzip.open(path_filename, 'rt') as file_in:
csv_reader = csv.DictReader(file_in)
# Adjust the batch size to meet memory and performance requirements
batch_size = 5000
array_size = 1000
data = []
for row in csv_reader:
# find compartment path
compartment_path = ""
for c in compartments:
if c['id'] == row['product/compartmentId']:
compartment_path = c['path']
# Handle Tags up to 3500 chars with # seperator
tags_data = ""
for (key, value) in row.items():
if 'tags' in key and len(value) > 0:
# remove # and = from the tags keys and value
keyadj = str(key).replace("tags/", "").replace("#", "").replace("=", "")
valueadj = str(value).replace("#", "").replace("=", "")
# check if length < 3500 to avoid overflow database column
if len(tags_data) + len(keyadj) + len(valueadj) + 2 < 3500:
tags_data += ("#" if tags_data == "" else "") + keyadj + "=" + valueadj + "#"
# add tag key to tag_keys array
if keyadj not in tags_keys:
tags_keys.append(keyadj)
# Assign each column to variable to avoid error if column missing from the file
lineItem_intervalUsageStart = get_column_value_from_array('lineItem/intervalUsageStart', row)
lineItem_intervalUsageEnd = get_column_value_from_array('lineItem/intervalUsageEnd', row)
product_service = get_column_value_from_array('product/service', row)
product_resource = get_column_value_from_array('product/resource', row)
product_compartmentId = get_column_value_from_array('product/compartmentId', row)
product_compartmentName = get_column_value_from_array('product/compartmentName', row)
product_region = get_column_value_from_array('product/region', row)
product_availabilityDomain = get_column_value_from_array('product/availabilityDomain', row)
product_resourceId = get_column_value_from_array('product/resourceId', row)
usage_billedQuantity = get_column_value_from_array('usage/billedQuantity', row)
usage_consumedQuantity = get_column_value_from_array('usage/consumedQuantity', row)
usage_consumedQuantityUnits = get_column_value_from_array('usage/consumedQuantityUnits', row)
usage_consumedQuantityMeasure = get_column_value_from_array('usage/consumedQuantityMeasure', row)
lineItem_isCorrection = get_column_value_from_array('lineItem/isCorrection', row)
num_rows += 1
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usage/' + str(tenancy.name)
myobj = {
'a1': str(tenancy.name),
'a2': file_id,
'a3': lineItem_intervalUsageStart[0:10] + " " + lineItem_intervalUsageStart[11:16],
'a4': lineItem_intervalUsageEnd[0:10] + " " + lineItem_intervalUsageEnd[11:16],
'a5': product_service,
'a6': product_resource,
'a7': product_compartmentId,
'a8': product_compartmentName,
'a9': compartment_path,
'a10': product_region,
'a11': product_availabilityDomain,
'a12': product_resourceId,
'a13': usage_billedQuantity,
'a14': usage_consumedQuantity,
'a15': usage_consumedQuantityUnits,
'a16': usage_consumedQuantityMeasure,
'a17': lineItem_isCorrection,
'a18': tags_data
}
x = requests.post(url, data = myobj)
#print(" Completed file " + o.name + " - " + str(num_rows) + " Rows Inserted")
num_files += 1
# remove file
os.remove(path_filename)
#######################################
# insert bulk tags to the database
#######################################
data = []
for tag in tags_keys:
row_data = (str(tenancy.name), tag, str(tenancy.name), tag)
url = 'https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usagetags/' + str(tenancy.name)
myobj = {'tag': tag}
x = requests.post(url, data = myobj)
return num_files
except Exception as e:
print("\nload_usage_file() - Error Download Usage and insert to database 02 - " + str(e))
raise SystemExit
##########################################################################
# Main
##########################################################################
def main_process():
cmd = set_parser_arguments()
if cmd is None:
exit()
config, signer = create_signer(cmd)
############################################
# Start
############################################
#print_header("Running Usage Load to ADW", 0)
#print("Starts at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
#print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
############################################
# Identity extract compartments
############################################
compartments = []
tenancy = None
try:
#print("\nConnecting to Identity Service...")
identity = oci.identity.IdentityClient(config, signer=signer)
if cmd.proxy:
identity.base_client.session.proxies = {'https': cmd.proxy}
tenancy = identity.get_tenancy(config["tenancy"]).data
tenancy_home_region = ""
# find home region full name
subscribed_regions = identity.list_region_subscriptions(tenancy.id).data
for reg in subscribed_regions:
if reg.is_home_region:
tenancy_home_region = str(reg.region_name)
#print(" Tenant Name : " + str(tenancy.name))
#print(" Tenant Id : " + tenancy.id)
#print(" App Version : " + version)
#print(" Home Region : " + tenancy_home_region)
#print("")
# set signer home region
signer.region = tenancy_home_region
config['region'] = tenancy_home_region
# Extract compartments
compartments = identity_read_compartments(identity, tenancy)
except Exception as e:
print("\nError extracting compartments section - " + str(e) + "\n")
raise SystemExit
############################################
# connect to database
############################################
max_usage_file_id = ""
max_cost_file_id = ""
poc_status = ""
try:
#print('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/pocstatus/' + str(tenancy.name))
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/pocstatus/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['status'])
poc_status = response['status']
#print(poc_status)
if (poc_status==3):
#print('if')
sys.exit()
###############################
# fetch max file id processed
# for usage and cost
###############################
#print("\nChecking Last Loaded File...")
#sql = "select /*+ full(a) parallel(a,4) */ nvl(max(file_id),'0') as file_id from OCI_USAGE a where TENANT_NAME=:tenant_name"
#cursor.execute(sql, {"tenant_name": str(tenancy.name)})
#max_usage_file_id, = cursor.fetchone()
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/usage/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['file_id'])
max_usage_file_id = response['file_id']
x = requests.get('https://qhs3h6j0buxd9es-p2p.adb.sa-saopaulo-1.oraclecloudapps.com/ords/usage/poccontrol/cost/' + str(tenancy.name))
response = json.loads(x.text)
#print(response['file_id'])
max_cost_file_id = response['file_id']
#print(" Max Usage File Id Processed = " + str(max_usage_file_id))
#print(" Max Cost File Id Processed = " + str(max_cost_file_id))
except Exception as e:
raise Exception("\nError manipulating database - " + str(e))
############################################
# Download Usage, cost and insert to database
############################################
try:
#print("\nConnecting to Object Storage Service...")
object_storage = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage.base_client.session.proxies = {'https': cmd.proxy}
#print(" Connected")
#############################
# Handle Report Usage
#############################
usage_num = 0
if not cmd.skip_usage:
#print("\nHandling Usage Report...")
objects = object_storage.list_objects(usage_report_namespace, str(tenancy.id), fields="timeCreated,size", limit=999, prefix="reports/usage-csv/", start="reports/usage-csv/" + max_usage_file_id).data
for object_file in objects.objects:
usage_num += load_usage_file(object_storage, object_file, max_usage_file_id, cmd, tenancy, compartments)
#print("\n Total " + str(usage_num) + " Usage Files Loaded")
#############################
# Handle Cost Usage
#############################
cost_num = 0
if not cmd.skip_cost:
#print("\nHandling Cost Report...")
objects = object_storage.list_objects(usage_report_namespace, str(tenancy.id), fields="timeCreated,size", limit=999, prefix="reports/cost-csv/", start="reports/cost-csv/" + max_cost_file_id).data
for object_file in objects.objects:
cost_num += load_cost_file(object_storage, object_file, max_cost_file_id, cmd, tenancy, compartments)
#print("\n Total " + str(cost_num) + " Cost Files Loaded")
# Handle Index structure if not exist
#check_database_index_structure_usage(connection)
#check_database_index_structure_cost(connection)
# Update oci_usage_stats and oci_cost_stats if there were files
#if usage_num > 0:
# update_usage_stats(connection)
#if cost_num > 0:
# update_cost_stats(connection)
# update_cost_reference(connection)
# update_price_list(connection)
# update_public_rates(connection, tenancy.name)
except Exception as e:
print("\nError Download Usage and insert to database 03 - " + str(e))
############################################
# print completed
############################################
#print("\nCompleted at " + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
##########################################################################
# Execute Main Process
##########################################################################
main_process()
|
[
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"pandas.read_csv",
"oci.config.get_config_value_or_default",
"requests.post",
"json.loads",
"os.path.exists",
"requests.get",
"oci.auth.signers.InstancePrincipalsSecurityTokenSigner",
"argparse.FileType",
"oci.config.from_file",
"csv.DictReader",
"oci.identity.IdentityClient",
"time.sleep",
"sys.exit",
"gzip.open",
"oci.object_storage.ObjectStorageClient",
"oci.pagination.list_call_get_all_results"
] |
[((1080, 1111), 'os.path.exists', 'os.path.exists', (['work_report_dir'], {}), '(work_report_dir)\n', (1094, 1111), False, 'import os\n'), ((1117, 1142), 'os.mkdir', 'os.mkdir', (['work_report_dir'], {}), '(work_report_dir)\n', (1125, 1142), False, 'import os\n'), ((5794, 5819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5817, 5819), False, 'import argparse\n'), ((2709, 2758), 'oci.config.from_file', 'oci.config.from_file', (['config_file', 'config_section'], {}), '(config_file, config_section)\n', (2729, 2758), False, 'import oci\n'), ((29902, 29926), 'os.remove', 'os.remove', (['path_filename'], {}), '(path_filename)\n', (29911, 29926), False, 'import os\n'), ((29935, 29964), 'os.remove', 'os.remove', (['path_filename[:-3]'], {}), '(path_filename[:-3])\n', (29944, 29964), False, 'import os\n'), ((29973, 30016), 'os.remove', 'os.remove', (["(path_filename[:-3][:-3] + 'json')"], {}), "(path_filename[:-3][:-3] + 'json')\n", (29982, 30016), False, 'import os\n'), ((36468, 36492), 'os.remove', 'os.remove', (['path_filename'], {}), '(path_filename)\n', (36477, 36492), False, 'import os\n'), ((38009, 38059), 'oci.identity.IdentityClient', 'oci.identity.IdentityClient', (['config'], {'signer': 'signer'}), '(config, signer=signer)\n', (38036, 38059), False, 'import oci\n'), ((39598, 39616), 'json.loads', 'json.loads', (['x.text'], {}), '(x.text)\n', (39608, 39616), False, 'import json\n'), ((40404, 40422), 'json.loads', 'json.loads', (['x.text'], {}), '(x.text)\n', (40414, 40422), False, 'import json\n'), ((40669, 40687), 'json.loads', 'json.loads', (['x.text'], {}), '(x.text)\n', (40679, 40687), False, 'import json\n'), ((41266, 41327), 'oci.object_storage.ObjectStorageClient', 'oci.object_storage.ObjectStorageClient', (['config'], {'signer': 'signer'}), '(config, signer=signer)\n', (41304, 41327), False, 'import oci\n'), ((2370, 2426), 'oci.auth.signers.InstancePrincipalsSecurityTokenSigner', 'oci.auth.signers.InstancePrincipalsSecurityTokenSigner', ([], {}), '()\n', (2424, 2426), False, 'import oci\n'), ((5856, 5878), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (5873, 5878), False, 'import argparse\n'), ((20773, 20803), 'gzip.open', 'gzip.open', (['path_filename', '"""rt"""'], {}), "(path_filename, 'rt')\n", (20782, 20803), False, 'import gzip\n'), ((20841, 20864), 'csv.DictReader', 'csv.DictReader', (['file_in'], {}), '(file_in)\n', (20855, 20864), False, 'import csv\n'), ((21037, 21068), 'pandas.read_csv', 'pd.read_csv', (['path_filename[:-3]'], {}), '(path_filename[:-3])\n', (21048, 21068), True, 'import pandas as pd\n'), ((21497, 21527), 'requests.post', 'requests.post', (['url'], {'data': 'myobj'}), '(url, data=myobj)\n', (21510, 21527), False, 'import requests\n'), ((21576, 21606), 'gzip.open', 'gzip.open', (['path_filename', '"""rt"""'], {}), "(path_filename, 'rt')\n", (21585, 21606), False, 'import gzip\n'), ((21644, 21667), 'csv.DictReader', 'csv.DictReader', (['file_in'], {}), '(file_in)\n', (21658, 21667), False, 'import csv\n'), ((30500, 30530), 'requests.post', 'requests.post', (['url'], {'data': 'myobj'}), '(url, data=myobj)\n', (30513, 30530), False, 'import requests\n'), ((32179, 32209), 'gzip.open', 'gzip.open', (['path_filename', '"""rt"""'], {}), "(path_filename, 'rt')\n", (32188, 32209), False, 'import gzip\n'), ((32247, 32270), 'csv.DictReader', 'csv.DictReader', (['file_in'], {}), '(file_in)\n', (32261, 32270), False, 'import csv\n'), ((36941, 36971), 'requests.post', 'requests.post', (['url'], {'data': 'myobj'}), '(url, data=myobj)\n', (36954, 36971), False, 'import requests\n'), ((39787, 39797), 'sys.exit', 'sys.exit', ([], {}), '()\n', (39795, 39797), False, 'import sys\n'), ((3000, 3061), 'oci.config.get_config_value_or_default', 'oci.config.get_config_value_or_default', (['config', '"""pass_phrase"""'], {}), "(config, 'pass_phrase')\n", (3038, 3061), False, 'import oci\n'), ((3573, 3689), 'oci.pagination.list_call_get_all_results', 'oci.pagination.list_call_get_all_results', (['identity.list_compartments', 'tenancy.id'], {'compartment_id_in_subtree': '(True)'}), '(identity.list_compartments,\n tenancy.id, compartment_id_in_subtree=True)\n', (3613, 3689), False, 'import oci\n'), ((29719, 29749), 'requests.post', 'requests.post', (['url'], {'data': 'myobj'}), '(url, data=myobj)\n', (29732, 29749), False, 'import requests\n'), ((36285, 36315), 'requests.post', 'requests.post', (['url'], {'data': 'myobj'}), '(url, data=myobj)\n', (36298, 36315), False, 'import requests\n'), ((15330, 15431), 'requests.get', 'requests.get', (['(api_url + cost_product_sku)'], {'headers': "{'X-Oracle-Accept-CurrencyCode': country_code}"}), "(api_url + cost_product_sku, headers={\n 'X-Oracle-Accept-CurrencyCode': country_code})\n", (15342, 15431), False, 'import requests\n'), ((15447, 15462), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (15457, 15462), False, 'import time\n'), ((15633, 15646), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (15643, 15646), False, 'import time\n')]
|
"""
SUSOD dev config.
currently not very useful
"""
import os
APPLICATION_ROOT = '/'
SECRET_KEY = b'tobegenerated'
SESSION_COOKIE_NAME = 'login_name'
# Directory for file uploads
# currently not used
UPLOAD_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'var',
'uploads',
)
# Database configuration
DATABASE_HOSTNAME = 'localhost'
DATABASE_NAME = 'dbSUSOD'
DATABASE_USERNAME = 'susod'
DATABASE_PASSWORD = 'password'
|
[
"os.path.realpath"
] |
[((267, 293), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (283, 293), False, 'import os\n')]
|
import pytest
#from project0 import project0
from project0 import main
def test_fetchincidents():
url = "https://www.normanok.gov/sites/default/files/documents/2021-03/2021-03-03_daily_incident_summary.pdf"
data = main.fetchincidents(url)
assert type(data) == bytes
def test_extractincidents():
url = "https://www.normanok.gov/sites/default/files/documents/2021-03/2021-03-03_daily_incident_summary.pdf"
data = main.fetchincidents(url)
incidentdata = main.extractincidents(data)
assert type(incidentdata)== list
def test_createdb():
db = main.createdb()
assert db == 'normanpd.db'
def test_populatedb():
assert True
def test_status():
assert True
|
[
"project0.main.extractincidents",
"project0.main.fetchincidents",
"project0.main.createdb"
] |
[((223, 247), 'project0.main.fetchincidents', 'main.fetchincidents', (['url'], {}), '(url)\n', (242, 247), False, 'from project0 import main\n'), ((434, 458), 'project0.main.fetchincidents', 'main.fetchincidents', (['url'], {}), '(url)\n', (453, 458), False, 'from project0 import main\n'), ((478, 505), 'project0.main.extractincidents', 'main.extractincidents', (['data'], {}), '(data)\n', (499, 505), False, 'from project0 import main\n'), ((574, 589), 'project0.main.createdb', 'main.createdb', ([], {}), '()\n', (587, 589), False, 'from project0 import main\n')]
|
from rest_framework import serializers
from users.models import User, Permission, Role
class RoleRelatedField(serializers.RelatedField):
def to_representation(self, instance):
return RoleSerializer(instance).data
def to_internal_value(self, data):
return self.queryset.get(pk=data)
class UserSerializer(serializers.ModelSerializer):
role = RoleRelatedField(many=False, queryset=Role.objects.all())
class Meta:
model = User
fields = ['id', 'first_name', 'last_name', 'email', 'password', 'role']
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
def update(self, instance, validated_data):
password = validated_data.pop('password', None)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class PermissionRelatedField(serializers.StringRelatedField):
def to_representation(self, value):
return PermissionSerializer(value).data
def to_internal_value(self, data):
return data
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
permissions = PermissionRelatedField(many=True)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
instance = self.Meta.model(**validated_data)
instance.save()
instance.permissions.add(*permissions)
instance.save()
return instance
|
[
"users.models.Role.objects.all"
] |
[((413, 431), 'users.models.Role.objects.all', 'Role.objects.all', ([], {}), '()\n', (429, 431), False, 'from users.models import User, Permission, Role\n')]
|
#!/usr/bin/env python
"""Configuration loader for benchy benchmark harness.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
def _load():
"""Initializes and returns a singleton config dictionary.
"""
config = _load.config
if config is not None:
return _load.config
benchy_dir = os.path.dirname(os.path.realpath(__file__))
tools_dir = os.path.dirname(benchy_dir)
base_dir = os.path.dirname(tools_dir)
fbcode_dir = os.path.dirname(base_dir)
benchmark_dir = os.path.join(base_dir, 'benchmarks', 'php-octane')
_load.config = {
'ANYMEAN_PATH': os.path.join(benchy_dir, 'any_mean.py'),
'BENCHMARK_DIR': benchmark_dir,
'BENCH_ENTRY_PATH': os.path.join(benchmark_dir, 'harness-run.php'),
'BUILD_INTERNAL_PATH': os.path.join(fbcode_dir[1:], '_build',
'opt', 'hphp'),
'HARNESS_PATH': os.path.join(benchy_dir, 'benchy_harness.py'),
'INCLUDE_PATH': os.path.join(benchmark_dir, 'include.php'),
'SIGNIFICANCE_PATH': os.path.join(benchy_dir, 'significance.py'),
'SUITES_PATH': os.path.join(benchmark_dir, 'suites.json'),
'VERSION': 1,
'WRAPPER_PATH': os.path.join(tools_dir, 'hhvm_wrapper.php'),
}
home_dir = os.path.expanduser('~')
config_path = os.path.join(home_dir, '.benchy')
with open(config_path, 'r') as config_file:
tmp = json.load(config_file)
work_dir = _load.config['WORK_DIR'] = tmp['work_dir']
_load.config['BUILD_ROOT'] = tmp['build_dir']
_load.config['RUNSCRIPT_PATH'] = os.path.join(work_dir, 'runscript')
_load.config['RUNLOG_PATH'] = os.path.join(work_dir, 'runlog')
_load.config['PERF_PATH'] = os.path.join(work_dir, 'perf')
_load.config['TMP_PATH'] = os.path.join(work_dir, 'tmp')
_load.config['PLATFORM'] = "%s_platform" % tmp['platform']
return _load.config
_load.config = None
def _get(key):
"""Looks up the given key in the config singleton.
"""
config = _load()
if key in config:
return config[key]
return None
ANYMEAN_PATH = _get('ANYMEAN_PATH')
BENCHMARK_DIR = _get('BENCHMARK_DIR')
BENCH_ENTRY_PATH = _get('BENCH_ENTRY_PATH')
BUILD_ROOT = _get('BUILD_ROOT')
BUILD_INTERNAL_PATH = _get('BUILD_INTERNAL_PATH')
HARNESS_PATH = _get('HARNESS_PATH')
INCLUDE_PATH = _get('INCLUDE_PATH')
PERF_PATH = _get('PERF_PATH')
PLATFORM = _get('PLATFORM')
RUNLOG_PATH = _get('RUNLOG_PATH')
RUNSCRIPT_PATH = _get('RUNSCRIPT_PATH')
SIGNIFICANCE_PATH = _get('SIGNIFICANCE_PATH')
SUITES_PATH = _get('SUITES_PATH')
TMP_PATH = _get('TMP_PATH')
VERSION = _get('VERSION')
WORK_DIR = _get('WORK_DIR')
WRAPPER_PATH = _get('WRAPPER_PATH')
|
[
"json.load",
"os.path.join",
"os.path.dirname",
"os.path.realpath",
"os.path.expanduser"
] |
[((497, 524), 'os.path.dirname', 'os.path.dirname', (['benchy_dir'], {}), '(benchy_dir)\n', (512, 524), False, 'import os\n'), ((540, 566), 'os.path.dirname', 'os.path.dirname', (['tools_dir'], {}), '(tools_dir)\n', (555, 566), False, 'import os\n'), ((584, 609), 'os.path.dirname', 'os.path.dirname', (['base_dir'], {}), '(base_dir)\n', (599, 609), False, 'import os\n'), ((630, 680), 'os.path.join', 'os.path.join', (['base_dir', '"""benchmarks"""', '"""php-octane"""'], {}), "(base_dir, 'benchmarks', 'php-octane')\n", (642, 680), False, 'import os\n'), ((1406, 1429), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1424, 1429), False, 'import os\n'), ((1448, 1481), 'os.path.join', 'os.path.join', (['home_dir', '""".benchy"""'], {}), "(home_dir, '.benchy')\n", (1460, 1481), False, 'import os\n'), ((453, 479), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (469, 479), False, 'import os\n'), ((726, 765), 'os.path.join', 'os.path.join', (['benchy_dir', '"""any_mean.py"""'], {}), "(benchy_dir, 'any_mean.py')\n", (738, 765), False, 'import os\n'), ((835, 881), 'os.path.join', 'os.path.join', (['benchmark_dir', '"""harness-run.php"""'], {}), "(benchmark_dir, 'harness-run.php')\n", (847, 881), False, 'import os\n'), ((914, 967), 'os.path.join', 'os.path.join', (['fbcode_dir[1:]', '"""_build"""', '"""opt"""', '"""hphp"""'], {}), "(fbcode_dir[1:], '_build', 'opt', 'hphp')\n", (926, 967), False, 'import os\n'), ((1037, 1082), 'os.path.join', 'os.path.join', (['benchy_dir', '"""benchy_harness.py"""'], {}), "(benchy_dir, 'benchy_harness.py')\n", (1049, 1082), False, 'import os\n'), ((1108, 1150), 'os.path.join', 'os.path.join', (['benchmark_dir', '"""include.php"""'], {}), "(benchmark_dir, 'include.php')\n", (1120, 1150), False, 'import os\n'), ((1181, 1224), 'os.path.join', 'os.path.join', (['benchy_dir', '"""significance.py"""'], {}), "(benchy_dir, 'significance.py')\n", (1193, 1224), False, 'import os\n'), ((1249, 1291), 'os.path.join', 'os.path.join', (['benchmark_dir', '"""suites.json"""'], {}), "(benchmark_dir, 'suites.json')\n", (1261, 1291), False, 'import os\n'), ((1339, 1382), 'os.path.join', 'os.path.join', (['tools_dir', '"""hhvm_wrapper.php"""'], {}), "(tools_dir, 'hhvm_wrapper.php')\n", (1351, 1382), False, 'import os\n'), ((1544, 1566), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (1553, 1566), False, 'import json\n'), ((1724, 1759), 'os.path.join', 'os.path.join', (['work_dir', '"""runscript"""'], {}), "(work_dir, 'runscript')\n", (1736, 1759), False, 'import os\n'), ((1798, 1830), 'os.path.join', 'os.path.join', (['work_dir', '"""runlog"""'], {}), "(work_dir, 'runlog')\n", (1810, 1830), False, 'import os\n'), ((1867, 1897), 'os.path.join', 'os.path.join', (['work_dir', '"""perf"""'], {}), "(work_dir, 'perf')\n", (1879, 1897), False, 'import os\n'), ((1933, 1962), 'os.path.join', 'os.path.join', (['work_dir', '"""tmp"""'], {}), "(work_dir, 'tmp')\n", (1945, 1962), False, 'import os\n')]
|
import json
def read_json_file(filename):
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
def find_base_url(url):
end_pos = find_nth(url, '/', 4)
return url[0:end_pos]
|
[
"json.load"
] |
[((83, 95), 'json.load', 'json.load', (['f'], {}), '(f)\n', (92, 95), False, 'import json\n')]
|
import unittest
import numpy as np
from numpy.testing import assert_array_equal,\
assert_array_almost_equal, assert_almost_equal
from .image_generation import binary_circle_border
from ..spim import Spim, SpimStage
from ..process_opencv import ContourFinderSimple, FeatureFormFilter
class FeatureFilterTestCase(unittest.TestCase):
seed = 0
repetitions = 20
def test_binary_circle_left_border_filter(self):
h, w = [1000, 2000]
contour_finder = ContourFinderSimple()
feature_filter = FeatureFormFilter(size=0,
solidity=0.9,
remove_on_edge=True)
for i in range(self.repetitions):
# randomly select a border
j = np.random.randint(low=0, high=3)
border = ["left", "right", "top", "bottom"][j]
circ_im, exp_pos, exp_radius = binary_circle_border(
border,
shape=(h, w),
val_type=np.uint8,
seed=self.seed)
assert_array_equal(np.sort(np.unique(circ_im)), np.array([0, 255]))
# make spim, assuming image is already binary
bin_spim = Spim(image=circ_im,
metadata={},
stage=SpimStage.binarized,
cached=False,
predecessors=[])
cont_spim = bin_spim\
.extract_features(contour_finder)\
.filter_features(feature_filter)
blobs = cont_spim.metadata["contours"]
self.assertEqual(len(blobs), 0)
|
[
"numpy.random.randint",
"numpy.array",
"numpy.unique"
] |
[((802, 834), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(3)'}), '(low=0, high=3)\n', (819, 834), True, 'import numpy as np\n'), ((1149, 1167), 'numpy.array', 'np.array', (['[0, 255]'], {}), '([0, 255])\n', (1157, 1167), True, 'import numpy as np\n'), ((1128, 1146), 'numpy.unique', 'np.unique', (['circ_im'], {}), '(circ_im)\n', (1137, 1146), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os
import yaml
rootDir = 'apis'
for dirName, subdirList, fileList in os.walk(rootDir):
if 'env.yaml' in fileList:
print(f"Found env.yaml in {dirName}")
with open(os.path.join(dirName, 'env.yaml'), 'r') as stream:
try:
env_yaml = yaml.safe_load(stream)
print(f"Loaded env.yaml in {dirName}")
except yaml.YAMLError as exc:
print(exc)
print(env_yaml)
try:
os.system(f"cd {dirName} && rm -rf .env Pipfile")
except:
print("Could not remove .env and Pipfile")
packages_to_install = ' '.join(env_yaml['packages']) + ' git+https://github.com/theunifai/unifai-api-utils.git'
os.system(f"cd {dirName} && echo Y | pipenv --python {env_yaml['python']['version']}")
os.system(f"cd {dirName} && pipenv run pip install {packages_to_install}")
|
[
"yaml.safe_load",
"os.walk",
"os.path.join",
"os.system"
] |
[((101, 117), 'os.walk', 'os.walk', (['rootDir'], {}), '(rootDir)\n', (108, 117), False, 'import os\n'), ((814, 910), 'os.system', 'os.system', (['f"""cd {dirName} && echo Y | pipenv --python {env_yaml[\'python\'][\'version\']}"""'], {}), '(\n f"cd {dirName} && echo Y | pipenv --python {env_yaml[\'python\'][\'version\']}"\n )\n', (823, 910), False, 'import os\n'), ((913, 987), 'os.system', 'os.system', (['f"""cd {dirName} && pipenv run pip install {packages_to_install}"""'], {}), "(f'cd {dirName} && pipenv run pip install {packages_to_install}')\n", (922, 987), False, 'import os\n'), ((219, 252), 'os.path.join', 'os.path.join', (['dirName', '"""env.yaml"""'], {}), "(dirName, 'env.yaml')\n", (231, 252), False, 'import os\n'), ((314, 336), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (328, 336), False, 'import yaml\n'), ((536, 585), 'os.system', 'os.system', (['f"""cd {dirName} && rm -rf .env Pipfile"""'], {}), "(f'cd {dirName} && rm -rf .env Pipfile')\n", (545, 585), False, 'import os\n')]
|
# -*-coding:utf-8 -*-
u"""
:创建时间: 2021/12/5 1:40
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
from __future__ import unicode_literals, print_function
import imp
import init
imp.reload(init)
import regex_match_dialog
print(regex_match_dialog.exec_())
|
[
"imp.reload",
"regex_match_dialog.exec_"
] |
[((332, 348), 'imp.reload', 'imp.reload', (['init'], {}), '(init)\n', (342, 348), False, 'import imp\n'), ((382, 408), 'regex_match_dialog.exec_', 'regex_match_dialog.exec_', ([], {}), '()\n', (406, 408), False, 'import regex_match_dialog\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-10 22:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prodavnica', '0006_slika'),
]
operations = [
migrations.RemoveField(
model_name='slika',
name='lokacija',
),
migrations.AddField(
model_name='slika',
name='slika',
field=models.FileField(blank=True, upload_to='C:\\Python34\\Scripts\\env_site1\\slike'),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.FileField"
] |
[((290, 349), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""slika"""', 'name': '"""lokacija"""'}), "(model_name='slika', name='lokacija')\n", (312, 349), False, 'from django.db import migrations, models\n'), ((491, 577), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'upload_to': '"""C:\\\\Python34\\\\Scripts\\\\env_site1\\\\slike"""'}), "(blank=True, upload_to=\n 'C:\\\\Python34\\\\Scripts\\\\env_site1\\\\slike')\n", (507, 577), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import modeling.collision_model as cm
import visualization.panda.world as wd
if __name__ == '__main__':
base = wd.World(cam_pos=np.array([.7, .05, .3]), lookat_pos=np.zeros(3))
# object
object_ref = cm.CollisionModel(initor="./objects/bunnysim.stl",
cdprimit_type="box",
cdmesh_type="triangles")
object_ref.set_rgba([.9, .75, .35, 1])
# object 1
object1 = object_ref.copy()
object1.set_pos(np.array([0, -.18, 0]))
# object 2
object2 = object_ref.copy()
object2.set_pos(np.array([0, -.09, 0]))
# object 3
object3 = object_ref.copy()
object3.change_cdprimitive_type(cdprimitive_type="surface_balls")
object3.set_pos(np.array([0, .0, 0]))
# object 4
object4 = object_ref.copy()
object4.set_pos(np.array([0, .09, 0]))
# object 5
object5 = object_ref.copy()
object5.change_cdmesh_type(cdmesh_type="convex_hull")
object5.set_pos(np.array([0, .18, 0]))
# object 1 show
object1.attach_to(base)
# object 2 show
object2.attach_to(base)
object2.show_cdprimit()
# object 3 show
object3.attach_to(base)
object3.show_cdprimit()
# object 4 show
object4.attach_to(base)
object4.show_cdmesh()
# object 5 show
object5.attach_to(base)
object5.show_cdmesh()
base.run()
|
[
"numpy.array",
"numpy.zeros",
"modeling.collision_model.CollisionModel"
] |
[((231, 331), 'modeling.collision_model.CollisionModel', 'cm.CollisionModel', ([], {'initor': '"""./objects/bunnysim.stl"""', 'cdprimit_type': '"""box"""', 'cdmesh_type': '"""triangles"""'}), "(initor='./objects/bunnysim.stl', cdprimit_type='box',\n cdmesh_type='triangles')\n", (248, 331), True, 'import modeling.collision_model as cm\n'), ((508, 531), 'numpy.array', 'np.array', (['[0, -0.18, 0]'], {}), '([0, -0.18, 0])\n', (516, 531), True, 'import numpy as np\n'), ((599, 622), 'numpy.array', 'np.array', (['[0, -0.09, 0]'], {}), '([0, -0.09, 0])\n', (607, 622), True, 'import numpy as np\n'), ((760, 781), 'numpy.array', 'np.array', (['[0, 0.0, 0]'], {}), '([0, 0.0, 0])\n', (768, 781), True, 'import numpy as np\n'), ((849, 871), 'numpy.array', 'np.array', (['[0, 0.09, 0]'], {}), '([0, 0.09, 0])\n', (857, 871), True, 'import numpy as np\n'), ((997, 1019), 'numpy.array', 'np.array', (['[0, 0.18, 0]'], {}), '([0, 0.18, 0])\n', (1005, 1019), True, 'import numpy as np\n'), ((152, 178), 'numpy.array', 'np.array', (['[0.7, 0.05, 0.3]'], {}), '([0.7, 0.05, 0.3])\n', (160, 178), True, 'import numpy as np\n'), ((188, 199), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (196, 199), True, 'import numpy as np\n')]
|
import FWCore.ParameterSet.Config as cms
hltESPChi2MeasurementEstimator100 = cms.ESProducer("Chi2MeasurementEstimatorESProducer",
ComponentName = cms.string('hltESPChi2MeasurementEstimator100'),
MaxChi2 = cms.double(40.0),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2.0),
MinPtForHitRecoveryInGluedDet = cms.double(1e+12),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
nSigma = cms.double(4.0)
)
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.double"
] |
[((151, 198), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""hltESPChi2MeasurementEstimator100"""'], {}), "('hltESPChi2MeasurementEstimator100')\n", (161, 198), True, 'import FWCore.ParameterSet.Config as cms\n'), ((214, 230), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(40.0)'], {}), '(40.0)\n', (224, 230), True, 'import FWCore.ParameterSet.Config as cms\n'), ((254, 269), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (264, 269), True, 'import FWCore.ParameterSet.Config as cms\n'), ((288, 303), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(2.0)'], {}), '(2.0)\n', (298, 303), True, 'import FWCore.ParameterSet.Config as cms\n'), ((341, 368), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(1000000000000.0)'], {}), '(1000000000000.0)\n', (351, 368), True, 'import FWCore.ParameterSet.Config as cms\n'), ((383, 398), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.5)'], {}), '(0.5)\n', (393, 398), True, 'import FWCore.ParameterSet.Config as cms\n'), ((424, 438), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (434, 438), True, 'import FWCore.ParameterSet.Config as cms\n'), ((453, 468), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(4.0)'], {}), '(4.0)\n', (463, 468), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import unittest
import zserio
from testutils import getZserioApi
class StructTemplateInTemplateTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").struct_template_in_template
def testReadWrite(self):
structTemplateInTemplate = self.api.StructTemplateInTemplate(
self.api.Field_uint32(self.api.Compound_uint32(42)),
self.api.Field_string(self.api.Compound_string("string"))
)
writer = zserio.BitStreamWriter()
structTemplateInTemplate.write(writer)
reader = zserio.BitStreamReader(writer.byte_array, writer.bitposition)
readStructTemplateInTemplate = self.api.StructTemplateInTemplate()
readStructTemplateInTemplate.read(reader)
self.assertEqual(structTemplateInTemplate, readStructTemplateInTemplate)
|
[
"zserio.BitStreamReader",
"testutils.getZserioApi",
"zserio.BitStreamWriter"
] |
[((512, 536), 'zserio.BitStreamWriter', 'zserio.BitStreamWriter', ([], {}), '()\n', (534, 536), False, 'import zserio\n'), ((601, 662), 'zserio.BitStreamReader', 'zserio.BitStreamReader', (['writer.byte_array', 'writer.bitposition'], {}), '(writer.byte_array, writer.bitposition)\n', (623, 662), False, 'import zserio\n'), ((182, 220), 'testutils.getZserioApi', 'getZserioApi', (['__file__', '"""templates.zs"""'], {}), "(__file__, 'templates.zs')\n", (194, 220), False, 'from testutils import getZserioApi\n')]
|
# Original Source:
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment and RandAugment policies for enhanced image preprocessing.
AutoAugment Reference: https://arxiv.org/abs/1805.09501
RandAugment Reference: https://arxiv.org/abs/1909.13719
"""
from functools import partial
import jax
from jax import random
import jax.numpy as jnp
from imax import color_transforms
from imax import transforms
DEBUG = False
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
DEFAULT_RANDAUGMENT_VALUES = {
# function_name -> probability
# ORDER NEEDS TO BE KEPT THE SAME AS IN level_to_arg
'AutoContrast': 1., # 0
'Equalize': 1., # 1
'Invert': 0., # 2
'Posterize': 1., # 3
'Solarize': 0., # 4
'SolarizeAdd': 0., # 5
'Color': 1., # 6
'Contrast': 1., # 7
'Brightness': 1., # 8
'Sharpness': 1., # 9
'Rotate': 1., # 10
'ShearX': 1., # 11
'ShearY': 1., # 12
'TranslateX': 1., # 13
'TranslateY': 1., # 14
'FlipX': 1., # 15
'FlipY': 1., # 16
'Cutout': 1., # 17
}
DEFAULT_OPS = jnp.array(list(range(len(DEFAULT_RANDAUGMENT_VALUES.keys()))))
DEFAULT_PROBS = jnp.array(list(DEFAULT_RANDAUGMENT_VALUES.values())) / \
sum(list(DEFAULT_RANDAUGMENT_VALUES.values()))
def level_to_arg(cutout_val, translate_val, negate, level, mask_value):
"""
Translates the level to args for various functions.
Args:
cutout_val: value for cutout size of cutout function
translate_val: value for
negate: negate level
level: input level
Returns:
"""
return tuple({
'AutoContrast': (),
'Equalize': (),
'Invert': (),
'Posterize': (5 - jnp.min(jnp.array(
[4, (level / _MAX_LEVEL * 4).astype('uint8')])),),
'Solarize': (((level / _MAX_LEVEL) * 256).astype('uint8'),),
'SolarizeAdd': (((level / _MAX_LEVEL) * 110).astype('uint8'),),
'Color': _enhance_level_to_arg(level),
'Contrast': _enhance_level_to_arg(level),
'Brightness': _enhance_level_to_arg(level),
'Sharpness': _enhance_level_to_arg(level),
'Rotate': (_rotate_level_to_arg(level, negate),),
'ShearX': (_shear_level_to_arg(level, negate), 0),
'ShearY': (0, _shear_level_to_arg(level, negate)),
'TranslateX': (_translate_level_to_arg(translate_val, negate)[0], 0.),
'TranslateY': (0., _translate_level_to_arg(translate_val, negate)[1]),
'FlipX': (True, False),
'FlipY': (False, True),
'Cutout': (cutout_val, mask_value),
}.values())
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return 1.0 # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return level
def _enhance_level_to_arg(level):
return [(level / _MAX_LEVEL) * 1.8 + 0.1]
def _rotate_level_to_arg(level, negate):
level = (level / _MAX_LEVEL) * jnp.pi
level = jax.lax.cond(
negate,
lambda l: -l,
lambda l: l,
level
)
return level
def _shear_level_to_arg(level, negate):
level = (level / _MAX_LEVEL)
# Flip level to negative with 50% chance.
level = jax.lax.cond(
negate,
lambda l: -l,
lambda l: l,
level
)
return level
def _translate_level_to_arg(translate_val, negate):
# Flip level to negative with 50% chance.
level = jax.lax.cond(
negate,
lambda t: (-t[0], -t[1]),
lambda t: t,
translate_val
)
return level
def _apply_ops(image, args, selected_op):
"""
An abomination of a function to apply a chosen operation to an image.
Args:
image:
args:
selected_op:
Returns:
"""
geometric_transform = jnp.identity(4)
image, geometric_transform = jax.lax.switch(selected_op, [
lambda op: (color_transforms.autocontrast(op[0], *op[1][0]),
geometric_transform), # 0
lambda op: (color_transforms.equalize(op[0], *op[1][1]),
geometric_transform), # 1
lambda op: (color_transforms.invert(op[0], *op[1][2]),
geometric_transform), # 2
lambda op: (color_transforms.posterize(op[0], *op[1][3]),
geometric_transform), # 3
lambda op: (color_transforms.solarize(op[0], *op[1][4]),
geometric_transform), # 4
lambda op: (color_transforms.solarize_add(op[0], *op[1][5]),
geometric_transform), # 5
lambda op: (color_transforms.color(op[0], *op[1][6]),
geometric_transform), # 6
lambda op: (color_transforms.contrast(op[0], *op[1][7]),
geometric_transform), # 7
lambda op: (color_transforms.brightness(op[0], *op[1][8]),
geometric_transform), # 8
lambda op: (color_transforms.sharpness(op[0], *op[1][9]),
geometric_transform), # 9
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.rotate(*op[1][10]))), # 10
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.shear(*op[1][11]))), # 11
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.shear(*op[1][12]))), # 12
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.translate(*op[1][13]))), # 13
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.translate(*op[1][14]))), # 14
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.flip(*op[1][15]))), # 15
lambda op: (op[0], jnp.matmul(geometric_transform,
transforms.flip(*op[1][16]))), # 16
lambda op: (color_transforms.cutout(op[0], *op[1][17]),
geometric_transform), # 17
], (image, args))
return image, geometric_transform
# @jax.jit
def _randaugment_inner_for_loop(_, in_args):
"""
Loop body for for randougment.
Args:
i: loop iteration
in_args: loop body arguments
Returns:
updated loop arguments
"""
(image, geometric_transforms, random_key, available_ops, op_probs,
magnitude, cutout_const, translate_const, join_transforms,
default_replace_value) = in_args
random_keys = random.split(random_key, num=8)
random_key = random_keys[0] # keep for next iteration
op_to_select = random.choice(random_keys[1], available_ops, p=op_probs)
mask_value = jnp.where(default_replace_value > 0,
jnp.ones([image.shape[-1]]) * default_replace_value,
random.randint(random_keys[2],
[image.shape[-1]],
minval=-1, maxval=256))
random_magnitude = random.uniform(random_keys[3], [], minval=0.,
maxval=magnitude)
cutout_mask = color_transforms.get_random_cutout_mask(
random_keys[4],
image.shape,
cutout_const)
translate_vals = (random.uniform(random_keys[5], [], minval=0.0,
maxval=1.0) * translate_const,
random.uniform(random_keys[6], [], minval=0.0,
maxval=1.0) * translate_const)
negate = random.randint(random_keys[7], [], minval=0,
maxval=2).astype('bool')
args = level_to_arg(cutout_mask, translate_vals, negate,
random_magnitude, mask_value)
if DEBUG:
print(op_to_select, args[op_to_select])
image, geometric_transform = _apply_ops(image, args, op_to_select)
image, geometric_transform = jax.lax.cond(
jnp.logical_or(join_transforms, jnp.all(
jnp.not_equal(geometric_transform, jnp.identity(4)))),
lambda op: (op[0], op[1]),
lambda op: (transforms.apply_transform(op[0],
op[1],
mask_value=mask_value),
jnp.identity(4)),
(image, geometric_transform)
)
geometric_transforms = jnp.matmul(geometric_transforms, geometric_transform)
return(image, geometric_transforms, random_key, available_ops, op_probs,
magnitude, cutout_const, translate_const, join_transforms,
default_replace_value)
def distort_image_with_randaugment(image,
num_layers,
magnitude,
random_key,
cutout_const=40,
translate_const=50.0,
default_replace_value=-1,
available_ops=DEFAULT_OPS,
op_probs=DEFAULT_PROBS,
join_transforms=False):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper.
Usually best values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Usually best values are in the range
[5, 30].
random_key: random key to do random stuff
join_transforms: reduce multiple transforms to one.
Much more efficient but simpler.
cutout_const: max cutout size int
translate_const: maximum translation amount int
default_replace_value: default replacement value for pixels outside
of the image
available_ops: available operations
op_probs: probabilities of operations
join_transforms: apply transformations immediately or join them
Returns:
The augmented version of `image`.
"""
geometric_transforms = jnp.identity(4)
for_i_args = (image, geometric_transforms, random_key, available_ops,
op_probs, magnitude, cutout_const, translate_const,
join_transforms, default_replace_value)
if DEBUG: # un-jitted
for i in range(num_layers):
for_i_args = _randaugment_inner_for_loop(i, for_i_args)
else: # jitted
for_i_args = jax.lax.fori_loop(0, num_layers,
_randaugment_inner_for_loop, for_i_args)
image, geometric_transforms = for_i_args[0], for_i_args[1]
if join_transforms:
replace_value = jnp.where(default_replace_value > 0,
jnp.ones([image.shape[-1]]) * default_replace_value,
random.randint(random_key,
[image.shape[-1]],
minval=0,
maxval=256))
image = transforms.apply_transform(image, geometric_transforms,
mask_value=replace_value)
return image
#
# if not DEBUG:
# distort_image_with_randaugment = jax.jit(distort_image_with_randaugment, static_argnames=('default_replace_value', ))
|
[
"imax.color_transforms.invert",
"imax.color_transforms.cutout",
"imax.color_transforms.sharpness",
"imax.transforms.shear",
"jax.lax.cond",
"jax.random.uniform",
"imax.color_transforms.contrast",
"jax.numpy.matmul",
"jax.random.randint",
"imax.transforms.flip",
"imax.color_transforms.color",
"imax.color_transforms.autocontrast",
"imax.color_transforms.solarize",
"imax.transforms.rotate",
"imax.transforms.translate",
"imax.color_transforms.brightness",
"imax.color_transforms.equalize",
"imax.color_transforms.get_random_cutout_mask",
"jax.numpy.ones",
"jax.lax.fori_loop",
"imax.color_transforms.posterize",
"jax.random.split",
"jax.numpy.identity",
"imax.color_transforms.solarize_add",
"jax.random.choice",
"imax.transforms.apply_transform"
] |
[((4138, 4192), 'jax.lax.cond', 'jax.lax.cond', (['negate', '(lambda l: -l)', '(lambda l: l)', 'level'], {}), '(negate, lambda l: -l, lambda l: l, level)\n', (4150, 4192), False, 'import jax\n'), ((4381, 4435), 'jax.lax.cond', 'jax.lax.cond', (['negate', '(lambda l: -l)', '(lambda l: l)', 'level'], {}), '(negate, lambda l: -l, lambda l: l, level)\n', (4393, 4435), False, 'import jax\n'), ((4603, 4677), 'jax.lax.cond', 'jax.lax.cond', (['negate', '(lambda t: (-t[0], -t[1]))', '(lambda t: t)', 'translate_val'], {}), '(negate, lambda t: (-t[0], -t[1]), lambda t: t, translate_val)\n', (4615, 4677), False, 'import jax\n'), ((4968, 4983), 'jax.numpy.identity', 'jnp.identity', (['(4)'], {}), '(4)\n', (4980, 4983), True, 'import jax.numpy as jnp\n'), ((7717, 7748), 'jax.random.split', 'random.split', (['random_key'], {'num': '(8)'}), '(random_key, num=8)\n', (7729, 7748), False, 'from jax import random\n'), ((7827, 7883), 'jax.random.choice', 'random.choice', (['random_keys[1]', 'available_ops'], {'p': 'op_probs'}), '(random_keys[1], available_ops, p=op_probs)\n', (7840, 7883), False, 'from jax import random\n'), ((8226, 8290), 'jax.random.uniform', 'random.uniform', (['random_keys[3]', '[]'], {'minval': '(0.0)', 'maxval': 'magnitude'}), '(random_keys[3], [], minval=0.0, maxval=magnitude)\n', (8240, 8290), False, 'from jax import random\n'), ((8346, 8432), 'imax.color_transforms.get_random_cutout_mask', 'color_transforms.get_random_cutout_mask', (['random_keys[4]', 'image.shape', 'cutout_const'], {}), '(random_keys[4], image.shape,\n cutout_const)\n', (8385, 8432), False, 'from imax import color_transforms\n'), ((9578, 9631), 'jax.numpy.matmul', 'jnp.matmul', (['geometric_transforms', 'geometric_transform'], {}), '(geometric_transforms, geometric_transform)\n', (9588, 9631), True, 'import jax.numpy as jnp\n'), ((11549, 11564), 'jax.numpy.identity', 'jnp.identity', (['(4)'], {}), '(4)\n', (11561, 11564), True, 'import jax.numpy as jnp\n'), ((8045, 8117), 'jax.random.randint', 'random.randint', (['random_keys[2]', '[image.shape[-1]]'], {'minval': '(-1)', 'maxval': '(256)'}), '(random_keys[2], [image.shape[-1]], minval=-1, maxval=256)\n', (8059, 8117), False, 'from jax import random\n'), ((11941, 12014), 'jax.lax.fori_loop', 'jax.lax.fori_loop', (['(0)', 'num_layers', '_randaugment_inner_for_loop', 'for_i_args'], {}), '(0, num_layers, _randaugment_inner_for_loop, for_i_args)\n', (11958, 12014), False, 'import jax\n'), ((12557, 12643), 'imax.transforms.apply_transform', 'transforms.apply_transform', (['image', 'geometric_transforms'], {'mask_value': 'replace_value'}), '(image, geometric_transforms, mask_value=\n replace_value)\n', (12583, 12643), False, 'from imax import transforms\n'), ((7965, 7992), 'jax.numpy.ones', 'jnp.ones', (['[image.shape[-1]]'], {}), '([image.shape[-1]])\n', (7973, 7992), True, 'import jax.numpy as jnp\n'), ((8477, 8535), 'jax.random.uniform', 'random.uniform', (['random_keys[5]', '[]'], {'minval': '(0.0)', 'maxval': '(1.0)'}), '(random_keys[5], [], minval=0.0, maxval=1.0)\n', (8491, 8535), False, 'from jax import random\n'), ((8614, 8672), 'jax.random.uniform', 'random.uniform', (['random_keys[6]', '[]'], {'minval': '(0.0)', 'maxval': '(1.0)'}), '(random_keys[6], [], minval=0.0, maxval=1.0)\n', (8628, 8672), False, 'from jax import random\n'), ((8742, 8796), 'jax.random.randint', 'random.randint', (['random_keys[7]', '[]'], {'minval': '(0)', 'maxval': '(2)'}), '(random_keys[7], [], minval=0, maxval=2)\n', (8756, 8796), False, 'from jax import random\n'), ((12325, 12392), 'jax.random.randint', 'random.randint', (['random_key', '[image.shape[-1]]'], {'minval': '(0)', 'maxval': '(256)'}), '(random_key, [image.shape[-1]], minval=0, maxval=256)\n', (12339, 12392), False, 'from jax import random\n'), ((9310, 9373), 'imax.transforms.apply_transform', 'transforms.apply_transform', (['op[0]', 'op[1]'], {'mask_value': 'mask_value'}), '(op[0], op[1], mask_value=mask_value)\n', (9336, 9373), False, 'from imax import transforms\n'), ((9489, 9504), 'jax.numpy.identity', 'jnp.identity', (['(4)'], {}), '(4)\n', (9501, 9504), True, 'import jax.numpy as jnp\n'), ((12238, 12265), 'jax.numpy.ones', 'jnp.ones', (['[image.shape[-1]]'], {}), '([image.shape[-1]])\n', (12246, 12265), True, 'import jax.numpy as jnp\n'), ((5067, 5114), 'imax.color_transforms.autocontrast', 'color_transforms.autocontrast', (['op[0]', '*op[1][0]'], {}), '(op[0], *op[1][0])\n', (5096, 5114), False, 'from imax import color_transforms\n'), ((5183, 5226), 'imax.color_transforms.equalize', 'color_transforms.equalize', (['op[0]', '*op[1][1]'], {}), '(op[0], *op[1][1])\n', (5208, 5226), False, 'from imax import color_transforms\n'), ((5295, 5336), 'imax.color_transforms.invert', 'color_transforms.invert', (['op[0]', '*op[1][2]'], {}), '(op[0], *op[1][2])\n', (5318, 5336), False, 'from imax import color_transforms\n'), ((5405, 5449), 'imax.color_transforms.posterize', 'color_transforms.posterize', (['op[0]', '*op[1][3]'], {}), '(op[0], *op[1][3])\n', (5431, 5449), False, 'from imax import color_transforms\n'), ((5518, 5561), 'imax.color_transforms.solarize', 'color_transforms.solarize', (['op[0]', '*op[1][4]'], {}), '(op[0], *op[1][4])\n', (5543, 5561), False, 'from imax import color_transforms\n'), ((5630, 5677), 'imax.color_transforms.solarize_add', 'color_transforms.solarize_add', (['op[0]', '*op[1][5]'], {}), '(op[0], *op[1][5])\n', (5659, 5677), False, 'from imax import color_transforms\n'), ((5746, 5786), 'imax.color_transforms.color', 'color_transforms.color', (['op[0]', '*op[1][6]'], {}), '(op[0], *op[1][6])\n', (5768, 5786), False, 'from imax import color_transforms\n'), ((5855, 5898), 'imax.color_transforms.contrast', 'color_transforms.contrast', (['op[0]', '*op[1][7]'], {}), '(op[0], *op[1][7])\n', (5880, 5898), False, 'from imax import color_transforms\n'), ((5967, 6012), 'imax.color_transforms.brightness', 'color_transforms.brightness', (['op[0]', '*op[1][8]'], {}), '(op[0], *op[1][8])\n', (5994, 6012), False, 'from imax import color_transforms\n'), ((6081, 6125), 'imax.color_transforms.sharpness', 'color_transforms.sharpness', (['op[0]', '*op[1][9]'], {}), '(op[0], *op[1][9])\n', (6107, 6125), False, 'from imax import color_transforms\n'), ((7146, 7188), 'imax.color_transforms.cutout', 'color_transforms.cutout', (['op[0]', '*op[1][17]'], {}), '(op[0], *op[1][17])\n', (7169, 7188), False, 'from imax import color_transforms\n'), ((9235, 9250), 'jax.numpy.identity', 'jnp.identity', (['(4)'], {}), '(4)\n', (9247, 9250), True, 'import jax.numpy as jnp\n'), ((6271, 6300), 'imax.transforms.rotate', 'transforms.rotate', (['*op[1][10]'], {}), '(*op[1][10])\n', (6288, 6300), False, 'from imax import transforms\n'), ((6407, 6435), 'imax.transforms.shear', 'transforms.shear', (['*op[1][11]'], {}), '(*op[1][11])\n', (6423, 6435), False, 'from imax import transforms\n'), ((6542, 6570), 'imax.transforms.shear', 'transforms.shear', (['*op[1][12]'], {}), '(*op[1][12])\n', (6558, 6570), False, 'from imax import transforms\n'), ((6677, 6709), 'imax.transforms.translate', 'transforms.translate', (['*op[1][13]'], {}), '(*op[1][13])\n', (6697, 6709), False, 'from imax import transforms\n'), ((6816, 6848), 'imax.transforms.translate', 'transforms.translate', (['*op[1][14]'], {}), '(*op[1][14])\n', (6836, 6848), False, 'from imax import transforms\n'), ((6955, 6982), 'imax.transforms.flip', 'transforms.flip', (['*op[1][15]'], {}), '(*op[1][15])\n', (6970, 6982), False, 'from imax import transforms\n'), ((7089, 7116), 'imax.transforms.flip', 'transforms.flip', (['*op[1][16]'], {}), '(*op[1][16])\n', (7104, 7116), False, 'from imax import transforms\n')]
|
from __future__ import division
from resippy.image_objects.earth_overhead.geotiff.geotiff_image_factory import GeotiffImageFactory
from resippy.photogrammetry.dem.geotiff_dem import GeotiffDem
from resippy.photogrammetry.dem.constant_elevation_dem import ConstantElevationDem
class DemFactory:
@staticmethod
def from_gtiff_file(fname, # type: str
nodata_value=None, # type: float
interpolation_method='bilinear', # type: str
): # type (...) -> GeotiffDem
# type: (str) -> GeotiffDem
gtiff = GeotiffImageFactory.from_file(fname)
gtiff_dem = GeotiffDem()
gtiff_dem.set_geotiff_image(gtiff)
# attempt to get nodata value from the gtiff file itself
if nodata_value is None:
nodata_value = gtiff.get_metadata().get_nodata_val()
# TODO: optimization needed here, takes a very long time for large datasets
gtiff_dem.remove_nodata_values(nodata_value)
if interpolation_method == 'bilinear':
gtiff_dem.set_interpolation_to_bilinear()
elif interpolation_method == 'nearest':
gtiff_dem.set_interpolation_to_nearest()
else:
TypeError("interpolation method should either be 'bilinear' or 'nearest'")
return gtiff_dem
@staticmethod
def constant_elevation(
elevation=0 # type: float
): # type: (...) -> ConstantElevationDem
return ConstantElevationDem(elevation=elevation)
|
[
"resippy.photogrammetry.dem.geotiff_dem.GeotiffDem",
"resippy.image_objects.earth_overhead.geotiff.geotiff_image_factory.GeotiffImageFactory.from_file",
"resippy.photogrammetry.dem.constant_elevation_dem.ConstantElevationDem"
] |
[((593, 629), 'resippy.image_objects.earth_overhead.geotiff.geotiff_image_factory.GeotiffImageFactory.from_file', 'GeotiffImageFactory.from_file', (['fname'], {}), '(fname)\n', (622, 629), False, 'from resippy.image_objects.earth_overhead.geotiff.geotiff_image_factory import GeotiffImageFactory\n'), ((650, 662), 'resippy.photogrammetry.dem.geotiff_dem.GeotiffDem', 'GeotiffDem', ([], {}), '()\n', (660, 662), False, 'from resippy.photogrammetry.dem.geotiff_dem import GeotiffDem\n'), ((1481, 1522), 'resippy.photogrammetry.dem.constant_elevation_dem.ConstantElevationDem', 'ConstantElevationDem', ([], {'elevation': 'elevation'}), '(elevation=elevation)\n', (1501, 1522), False, 'from resippy.photogrammetry.dem.constant_elevation_dem import ConstantElevationDem\n')]
|
import asyncio
import time
################################################################################
async def main():
reader, writer = await asyncio.open_connection(
'127.0.0.1', 8888
)
while not reader.at_eof():
data = await reader.readline()
print('[{}] Received: {}'.format(time.strftime('%X'), data))
################################################################################
if __name__ == '__main__':
asyncio.run(main())
|
[
"asyncio.open_connection",
"time.strftime"
] |
[((153, 195), 'asyncio.open_connection', 'asyncio.open_connection', (['"""127.0.0.1"""', '(8888)'], {}), "('127.0.0.1', 8888)\n", (176, 195), False, 'import asyncio\n'), ((321, 340), 'time.strftime', 'time.strftime', (['"""%X"""'], {}), "('%X')\n", (334, 340), False, 'import time\n')]
|
#!/usr/bin/env python3
import numpy as np
import csv
import json
import sys
import argparse
import multiprocessing as mp
import glob
import os
from functools import partial
from sofa_print import *
import subprocess
from time import sleep, time
def sofa_record(command, logdir, cfg):
p_tcpdump = None
p_mpstat = None
p_vmstat = None
p_nvsmi = None
p_nvtopo = None
print_info('SOFA_COMMAND: %s' % command)
sample_freq = 99
if int(open("/proc/sys/kernel/kptr_restrict").read()) != 0:
print_error(
"/proc/kallsyms permission is restricted, please try the command below:")
print_error("sudo sysctl -w kernel.kptr_restrict=0")
quit()
if int(open("/proc/sys/kernel/perf_event_paranoid").read()) != -1:
print_error('PerfEvent is not avaiable, please try the command below:')
print_error('sudo sysctl -w kernel.perf_event_paranoid=-1')
quit()
if subprocess.call(['mkdir', '-p', logdir]):
quit()
subprocess.call('rm %s/perf.data > /dev/null 2> /dev/null' % logdir, shell=True )
subprocess.call('rm %s/sofa.pcap > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace*.nvvp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.csv > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.txt > /dev/null 2> /dev/null' % logdir, shell=True)
try:
print_info("Prolog of Recording...")
with open(os.devnull, 'w') as FNULL:
p_tcpdump = subprocess.Popen(["tcpdump",
'-i',
'any',
'-v',
'tcp',
'-w',
'%s/sofa.pcap' % logdir],
stderr=FNULL)
with open('%s/mpstat.txt' % logdir, 'w') as logfile:
p_mpstat = subprocess.Popen(
['mpstat', '-P', 'ALL', '1', '600'], stdout=logfile)
with open('%s/vmstat.txt' % logdir, 'w') as logfile:
p_vmstat = subprocess.Popen(['vmstat', '-w', '1', '600'], stdout=logfile)
if int(os.system('command -v nvprof')) == 0:
with open('%s/nvsmi.txt' % logdir, 'w') as logfile:
p_nvsmi = subprocess.Popen(['nvidia-smi', 'dmon', '-s', 'u'], stdout=logfile)
with open('%s/nvlink_topo.txt' % logdir, 'w') as logfile:
p_nvtopo = subprocess.Popen(['nvidia-smi', 'topo', '-m'], stdout=logfile)
with open('%s/sofa_time.txt' % logdir, 'w') as logfile:
logfile.write(str(int(time()))+'\n')
print_info("Recording...")
if cfg.profile_all_cpus == True:
perf_options = '-a'
else:
perf_options = ''
subprocess.call('cp /proc/kallsyms %s/' % (logdir), shell=True )
subprocess.call('chmod +w %s/kallsyms' % (logdir), shell=True )
if int(os.system('command -v nvprof')) == 0:
profile_command = 'nvprof --profile-child-processes -o %s/gputrace%%p.nvvp perf record -e cycles,bus-cycles -o %s/perf.data -F %s %s -- %s ' % (logdir, logdir, sample_freq, perf_options, command)
else:
print_warning('Profile without NVPROF')
profile_command = 'perf record -o %s/perf.data -e cycles,bus-cycles -F %s %s -- %s' % (logdir, sample_freq, perf_options, command)
print_info( profile_command)
subprocess.call(profile_command.split())
print_info("Epilog of Recording...")
if p_tcpdump != None:
p_tcpdump.terminate()
print_info("tried terminating tcpdump")
if p_vmstat != None:
p_vmstat.terminate()
print_info("tried terminating vmstat")
if p_mpstat != None:
p_mpstat.terminate()
print_info("tried terminating mpstat")
if p_nvtopo != None:
p_nvtopo.terminate()
print_info("tried terminating nvidia-smi topo")
if p_nvsmi != None:
p_nvsmi.terminate()
print_info("tried terminating nvidia-smi dmon")
#os.system('pkill tcpdump')
#os.system('pkill mpstat')
#os.system('pkill vmstat')
#os.system('pkill nvidia-smi')
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
if p_tcpdump != None:
p_tcpdump.kill()
print_info("tried killing tcpdump")
if p_vmstat != None:
p_vmstat.kill()
print_info("tried killing vmstat")
if p_mpstat != None:
p_mpstat.kill()
print_info("tried killing mpstat")
if p_nvtopo != None:
p_nvtopo.kill()
print_info("tried killing nvidia-smi topo")
if p_nvsmi != None:
p_nvsmi.kill()
print_info("tried killing nvidia-smi dmon")
raise
print_info("End of Recording")
|
[
"subprocess.Popen",
"os.system",
"time.time",
"subprocess.call",
"sys.exc_info"
] |
[((950, 990), 'subprocess.call', 'subprocess.call', (["['mkdir', '-p', logdir]"], {}), "(['mkdir', '-p', logdir])\n", (965, 990), False, 'import subprocess\n'), ((1011, 1096), 'subprocess.call', 'subprocess.call', (["('rm %s/perf.data > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/perf.data > /dev/null 2> /dev/null' % logdir, shell=True\n )\n", (1026, 1096), False, 'import subprocess\n'), ((1097, 1182), 'subprocess.call', 'subprocess.call', (["('rm %s/sofa.pcap > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/sofa.pcap > /dev/null 2> /dev/null' % logdir, shell=True\n )\n", (1112, 1182), False, 'import subprocess\n'), ((1182, 1271), 'subprocess.call', 'subprocess.call', (["('rm %s/gputrace*.nvvp > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/gputrace*.nvvp > /dev/null 2> /dev/null' % logdir,\n shell=True)\n", (1197, 1271), False, 'import subprocess\n'), ((1272, 1359), 'subprocess.call', 'subprocess.call', (["('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir,\n shell=True)\n", (1287, 1359), False, 'import subprocess\n'), ((1360, 1436), 'subprocess.call', 'subprocess.call', (["('rm %s/*.csv > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/*.csv > /dev/null 2> /dev/null' % logdir, shell=True)\n", (1375, 1436), False, 'import subprocess\n'), ((1441, 1517), 'subprocess.call', 'subprocess.call', (["('rm %s/*.txt > /dev/null 2> /dev/null' % logdir)"], {'shell': '(True)'}), "('rm %s/*.txt > /dev/null 2> /dev/null' % logdir, shell=True)\n", (1456, 1517), False, 'import subprocess\n'), ((2931, 2992), 'subprocess.call', 'subprocess.call', (["('cp /proc/kallsyms %s/' % logdir)"], {'shell': '(True)'}), "('cp /proc/kallsyms %s/' % logdir, shell=True)\n", (2946, 2992), False, 'import subprocess\n'), ((3004, 3064), 'subprocess.call', 'subprocess.call', (["('chmod +w %s/kallsyms' % logdir)"], {'shell': '(True)'}), "('chmod +w %s/kallsyms' % logdir, shell=True)\n", (3019, 3064), False, 'import subprocess\n'), ((1641, 1745), 'subprocess.Popen', 'subprocess.Popen', (["['tcpdump', '-i', 'any', '-v', 'tcp', '-w', '%s/sofa.pcap' % logdir]"], {'stderr': 'FNULL'}), "(['tcpdump', '-i', 'any', '-v', 'tcp', '-w', '%s/sofa.pcap' %\n logdir], stderr=FNULL)\n", (1657, 1745), False, 'import subprocess\n'), ((2035, 2104), 'subprocess.Popen', 'subprocess.Popen', (["['mpstat', '-P', 'ALL', '1', '600']"], {'stdout': 'logfile'}), "(['mpstat', '-P', 'ALL', '1', '600'], stdout=logfile)\n", (2051, 2104), False, 'import subprocess\n'), ((2206, 2268), 'subprocess.Popen', 'subprocess.Popen', (["['vmstat', '-w', '1', '600']"], {'stdout': 'logfile'}), "(['vmstat', '-w', '1', '600'], stdout=logfile)\n", (2222, 2268), False, 'import subprocess\n'), ((2284, 2314), 'os.system', 'os.system', (['"""command -v nvprof"""'], {}), "('command -v nvprof')\n", (2293, 2314), False, 'import os\n'), ((2412, 2479), 'subprocess.Popen', 'subprocess.Popen', (["['nvidia-smi', 'dmon', '-s', 'u']"], {'stdout': 'logfile'}), "(['nvidia-smi', 'dmon', '-s', 'u'], stdout=logfile)\n", (2428, 2479), False, 'import subprocess\n'), ((2577, 2639), 'subprocess.Popen', 'subprocess.Popen', (["['nvidia-smi', 'topo', '-m']"], {'stdout': 'logfile'}), "(['nvidia-smi', 'topo', '-m'], stdout=logfile)\n", (2593, 2639), False, 'import subprocess\n'), ((3083, 3113), 'os.system', 'os.system', (['"""command -v nvprof"""'], {}), "('command -v nvprof')\n", (3092, 3113), False, 'import os\n'), ((4459, 4473), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4471, 4473), False, 'import sys\n'), ((2740, 2746), 'time.time', 'time', ([], {}), '()\n', (2744, 2746), False, 'from time import sleep, time\n')]
|
#!/usr/bin/env python
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A utility script which sends test messages to a queue.
"""
from __future__ import absolute_import
import argparse
import eventlet
from kombu import Exchange
from st2common import config
from st2common.transport.publishers import PoolPublisher
def main(exchange, routing_key, payload):
exchange = Exchange(exchange, type='topic')
publisher = PoolPublisher()
publisher.publish(payload=payload, exchange=exchange, routing_key=routing_key)
eventlet.sleep(0.5)
if __name__ == '__main__':
config.parse_args(args={})
parser = argparse.ArgumentParser(description='Queue producer')
parser.add_argument('--exchange', required=True,
help='Exchange to publish the message to')
parser.add_argument('--routing-key', required=True,
help='Routing key to use')
parser.add_argument('--payload', required=True,
help='Message payload')
args = parser.parse_args()
main(exchange=args.exchange, routing_key=args.routing_key,
payload=args.payload)
|
[
"argparse.ArgumentParser",
"st2common.config.parse_args",
"st2common.transport.publishers.PoolPublisher",
"kombu.Exchange",
"eventlet.sleep"
] |
[((919, 951), 'kombu.Exchange', 'Exchange', (['exchange'], {'type': '"""topic"""'}), "(exchange, type='topic')\n", (927, 951), False, 'from kombu import Exchange\n'), ((968, 983), 'st2common.transport.publishers.PoolPublisher', 'PoolPublisher', ([], {}), '()\n', (981, 983), False, 'from st2common.transport.publishers import PoolPublisher\n'), ((1071, 1090), 'eventlet.sleep', 'eventlet.sleep', (['(0.5)'], {}), '(0.5)\n', (1085, 1090), False, 'import eventlet\n'), ((1124, 1150), 'st2common.config.parse_args', 'config.parse_args', ([], {'args': '{}'}), '(args={})\n', (1141, 1150), False, 'from st2common import config\n'), ((1164, 1217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Queue producer"""'}), "(description='Queue producer')\n", (1187, 1217), False, 'import argparse\n')]
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_integration_benchmark."""
import unittest
from perfkitbenchmarker import beam_pipeline_options
class BeamArgsOptionsTestCase(unittest.TestCase):
def testNoFlagsPassed(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
None, None, [], [])
self.assertListEqual(options_list, [])
def testAllFlagsPassed(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
"--itargone=anarg,--itargtwo=anotherarg",
"[\"--project=testProj\","
"\"--gcpTempLocation=gs://test-bucket/staging\"]",
[{"postgresUsername": "postgres"}, {"postgresPassword": "<PASSWORD>"}],
[{"name": "aTestVal", "type": "TestValue", "value": "this_is_a_test"},
{"name": "testier", "type": "TestValue", "value": "another_test"}]
)
self.assertListEqual(options_list,
["\"--itargone=anarg\"",
"\"--itargtwo=anotherarg\"",
"\"--project=testProj\"",
"\"--gcpTempLocation=gs://test-bucket/staging\"",
"\"--aTestVal=this_is_a_test\"",
"\"--testier=another_test\"",
"\"--postgresUsername=postgres\"",
"\"--postgresPassword=<PASSWORD>\""])
def testItOptionsWithSpaces(self):
options_list = beam_pipeline_options.GenerateAllPipelineOptions(
None,
"[\"--project=testProj\", "
"\"--gcpTempLocation=gs://test-bucket/staging\"]",
[],
[])
self.assertListEqual(options_list,
["\"--project=testProj\"",
"\"--gcpTempLocation=gs://test-bucket/staging\""])
def testDynamicPipelineOpionsWithFormat(self):
dynamic_options = [
{
"name": "test_value_A",
"type": "TestValue",
"value": "a_value",
"format": "other representation of {{TestValue}}",
},
{
"name": "test_value_B",
"type": "TestValue",
"value": "b_value"
}
]
self.assertListEqual(
beam_pipeline_options.EvaluateDynamicPipelineOptions(dynamic_options),
[
("test_value_A", "other representation of a_value"),
("test_value_B", "b_value"),
]
)
def dynamicPipelineOptions(self):
beam_pipeline_options.EvaluateDynamicPipelineOptions()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"perfkitbenchmarker.beam_pipeline_options.GenerateAllPipelineOptions",
"perfkitbenchmarker.beam_pipeline_options.EvaluateDynamicPipelineOptions"
] |
[((3148, 3163), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3161, 3163), False, 'import unittest\n'), ((828, 896), 'perfkitbenchmarker.beam_pipeline_options.GenerateAllPipelineOptions', 'beam_pipeline_options.GenerateAllPipelineOptions', (['None', 'None', '[]', '[]'], {}), '(None, None, [], [])\n', (876, 896), False, 'from perfkitbenchmarker import beam_pipeline_options\n'), ((1001, 1394), 'perfkitbenchmarker.beam_pipeline_options.GenerateAllPipelineOptions', 'beam_pipeline_options.GenerateAllPipelineOptions', (['"""--itargone=anarg,--itargtwo=anotherarg"""', '"""["--project=testProj","--gcpTempLocation=gs://test-bucket/staging"]"""', "[{'postgresUsername': 'postgres'}, {'postgresPassword': '<PASSWORD>'}]", "[{'name': 'aTestVal', 'type': 'TestValue', 'value': 'this_is_a_test'}, {\n 'name': 'testier', 'type': 'TestValue', 'value': 'another_test'}]"], {}), '(\n \'--itargone=anarg,--itargtwo=anotherarg\',\n \'["--project=testProj","--gcpTempLocation=gs://test-bucket/staging"]\',\n [{\'postgresUsername\': \'postgres\'}, {\'postgresPassword\': \'<PASSWORD>\'}],\n [{\'name\': \'aTestVal\', \'type\': \'TestValue\', \'value\': \'this_is_a_test\'},\n {\'name\': \'testier\', \'type\': \'TestValue\', \'value\': \'another_test\'}])\n', (1049, 1394), False, 'from perfkitbenchmarker import beam_pipeline_options\n'), ((2006, 2148), 'perfkitbenchmarker.beam_pipeline_options.GenerateAllPipelineOptions', 'beam_pipeline_options.GenerateAllPipelineOptions', (['None', '"""["--project=testProj", "--gcpTempLocation=gs://test-bucket/staging"]"""', '[]', '[]'], {}), '(None,\n \'["--project=testProj", "--gcpTempLocation=gs://test-bucket/staging"]\',\n [], [])\n', (2054, 2148), False, 'from perfkitbenchmarker import beam_pipeline_options\n'), ((3062, 3116), 'perfkitbenchmarker.beam_pipeline_options.EvaluateDynamicPipelineOptions', 'beam_pipeline_options.EvaluateDynamicPipelineOptions', ([], {}), '()\n', (3114, 3116), False, 'from perfkitbenchmarker import beam_pipeline_options\n'), ((2808, 2877), 'perfkitbenchmarker.beam_pipeline_options.EvaluateDynamicPipelineOptions', 'beam_pipeline_options.EvaluateDynamicPipelineOptions', (['dynamic_options'], {}), '(dynamic_options)\n', (2860, 2877), False, 'from perfkitbenchmarker import beam_pipeline_options\n')]
|
# Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=import-outside-toplevel
from typing import List
import argparse
from argparse import Namespace
from pcluster.cli.commands.common import CliCommand
class ConfigureCommand(CliCommand):
"""Implement pcluster configure command."""
# CLI
name = "configure"
help = "Start the AWS ParallelCluster configuration."
description = help
def __init__(self, subparsers):
super().__init__(subparsers, name=self.name, help=self.help, description=self.description)
def register_command_args(self, parser: argparse.ArgumentParser) -> None: # noqa: D102
parser.add_argument("-c", "--config", help="Path to output the generated config file.", required=True)
def execute(self, args: Namespace, extra_args: List[str]) -> None: # noqa: D102 #pylint: disable=unused-argument
from pcluster.cli.commands.configure.easyconfig import configure
configure(args)
|
[
"pcluster.cli.commands.configure.easyconfig.configure"
] |
[((1470, 1485), 'pcluster.cli.commands.configure.easyconfig.configure', 'configure', (['args'], {}), '(args)\n', (1479, 1485), False, 'from pcluster.cli.commands.configure.easyconfig import configure\n')]
|
import pandas as pd
import numpy as np
test_submission_guardians_may_24 = pd.read_csv('data/processed/test_submission_guardians_may_24.csv')
two_sub_combined_submission_may_23 = pd.read_csv('data/processed/two_sub_combined_submission_may_23.csv')
two_sub_combined_submission_may_23_y_pred_may_24 = two_sub_combined_submission_may_23
two_sub_combined_submission_may_23_y_pred_may_24['pred'] = test_submission_guardians_may_24['pred']
two_sub_combined_submission_may_23_y_pred_may_24.to_csv('data/processed/two_sub_combined_submission_may_23_y_pred_may_24.csv',index=False)
|
[
"pandas.read_csv"
] |
[((77, 143), 'pandas.read_csv', 'pd.read_csv', (['"""data/processed/test_submission_guardians_may_24.csv"""'], {}), "('data/processed/test_submission_guardians_may_24.csv')\n", (88, 143), True, 'import pandas as pd\n'), ((181, 249), 'pandas.read_csv', 'pd.read_csv', (['"""data/processed/two_sub_combined_submission_may_23.csv"""'], {}), "('data/processed/two_sub_combined_submission_may_23.csv')\n", (192, 249), True, 'import pandas as pd\n')]
|
import silbot
from silbot.helper import InlineKBMarkup, inlineKBRow, inlineKBData
"""
This is an example of how to use new methods added with silbot 1.1
This is a simple bot that will check if a user is in a channel or is admin of that channel
"""
token = "<KEY>" # Put bot token here
channelid = -1001086416281 # Change the channel ID, the but must be admin of the channel
bot = silbot.botapi.BotApi(token, "HTML")
r, response = bot.getMe()
if not response.ok:
print("Error, wrong bot Token")
exit()
else:
print("Bot @" + r.username + " started")
def updateH(update: silbot.types.Update, bot: silbot.botapi.BotApi):
if update.message is not None:
message = update.message
chat = message.chat
if message.text == "/start":
kb = InlineKBMarkup(
inlineKBRow(
inlineKBData("Join Check", "/join"),
inlineKBData("Admin Check", "/admin")
)
)
bot.sendMessage(chat.id,
"<b>Silbot Py Example</b>\n\nClick the button to check if you are admin/member of the channel defined in the config",
kb)
elif update.callback_query is not None:
callback = update.callback_query
user = callback.user
if callback.data == "/join":
r = user.isMember(bot, channelid)
if r:
callback.answer(bot, "You joined the channel")
elif not r:
callback.answer(bot, "You have not joined the channel")
elif callback.data == "/admin":
r = user.isAdmin(bot, channelid)
if r:
callback.answer(bot, "You are an admin of the channel")
elif not r:
callback.answer(bot, "You are not an admin of the channel")
silbot.GetUpdatesLoop(bot, updateH)
|
[
"silbot.botapi.BotApi",
"silbot.GetUpdatesLoop",
"silbot.helper.inlineKBData"
] |
[((384, 419), 'silbot.botapi.BotApi', 'silbot.botapi.BotApi', (['token', '"""HTML"""'], {}), "(token, 'HTML')\n", (404, 419), False, 'import silbot\n'), ((1849, 1884), 'silbot.GetUpdatesLoop', 'silbot.GetUpdatesLoop', (['bot', 'updateH'], {}), '(bot, updateH)\n', (1870, 1884), False, 'import silbot\n'), ((853, 888), 'silbot.helper.inlineKBData', 'inlineKBData', (['"""Join Check"""', '"""/join"""'], {}), "('Join Check', '/join')\n", (865, 888), False, 'from silbot.helper import InlineKBMarkup, inlineKBRow, inlineKBData\n'), ((910, 947), 'silbot.helper.inlineKBData', 'inlineKBData', (['"""Admin Check"""', '"""/admin"""'], {}), "('Admin Check', '/admin')\n", (922, 947), False, 'from silbot.helper import InlineKBMarkup, inlineKBRow, inlineKBData\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='project',
field=models.ForeignKey(
on_delete=models.CASCADE, default=0, to='orchestra.Project'),
preserve_default=False,
),
migrations.AlterField(
model_name='process',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='depends_on',
field=models.ManyToManyField(
related_name='depends_on_rel_+', to='orchestra.Step', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='required_certifications',
field=models.ManyToManyField(
to='orchestra.Certification', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='review_policy',
field=jsonfield.fields.JSONField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='step',
name='user_interface',
field=jsonfield.fields.JSONField(blank=True),
preserve_default=True,
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.TextField",
"django.db.models.ManyToManyField"
] |
[((364, 442), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'models.CASCADE', 'default': '(0)', 'to': '"""orchestra.Project"""'}), "(on_delete=models.CASCADE, default=0, to='orchestra.Project')\n", (381, 442), False, 'from django.db import models, migrations\n'), ((623, 651), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (639, 651), False, 'from django.db import models, migrations\n'), ((810, 902), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""depends_on_rel_+"""', 'to': '"""orchestra.Step"""', 'blank': '(True)'}), "(related_name='depends_on_rel_+', to='orchestra.Step',\n blank=True)\n", (832, 902), False, 'from django.db import models, migrations\n'), ((1075, 1103), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (1091, 1103), False, 'from django.db import models, migrations\n'), ((1275, 1339), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""orchestra.Certification"""', 'blank': '(True)'}), "(to='orchestra.Certification', blank=True)\n", (1297, 1339), False, 'from django.db import models, migrations\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2013-2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from mock import Mock
from mock import patch
from fuelclient.tests import base
class TestHandlers(base.BaseTestCase):
def test_env_action(self):
#check env help
help_msgs = ["usage: fuel environment [-h]",
"[--list | --set | --delete | --create | --update]",
"optional arguments:", "--help", "--list", "--set",
"--delete", "--rel", "--env-create",
"--create", "--name", "--env-name", "--mode", "--net",
"--network-mode", "--nst", "--net-segment-type",
"--deployment-mode", "--update", "--env-update"]
self.check_all_in_msg("env --help", help_msgs)
#no clusters
self.check_for_rows_in_table("env")
for action in ("set", "create", "delete"):
self.check_if_required("env {0}".format(action))
#list of tuples (<fuel CLI command>, <expected output of a command>)
expected_stdout = \
[(
"env --create --name=TestEnv --release=1",
"Environment 'TestEnv' with id=1, mode=ha_compact and "
"network-mode=nova_network was created!\n"
), (
"--env-id=1 env set --name=NewEnv",
("Following attributes are changed for "
"the environment: name=NewEnv\n")
), (
"--env-id=1 env set --mode=multinode",
("Following attributes are changed for "
"the environment: mode=multinode\n")
)]
for cmd, msg in expected_stdout:
self.check_for_stdout(cmd, msg)
def test_node_action(self):
help_msg = ["fuel node [-h] [--env ENV]",
"[--list | --set | --delete | --network | --disk |"
" --deploy | --delete-from-db | --provision]", "-h",
"--help", " -s", "--default", " -d", "--download", " -u",
"--upload", "--dir", "--node", "--node-id", " -r",
"--role", "--net"]
self.check_all_in_msg("node --help", help_msg)
self.check_for_rows_in_table("node")
for action in ("set", "remove", "--network", "--disk"):
self.check_if_required("node {0}".format(action))
self.load_data_to_nailgun_server()
self.check_number_of_rows_in_table("node --node 9f:b7,9d:24,ab:aa", 3)
def test_selected_node_deploy_or_provision(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller"
))
commands = ("--provision", "--deploy")
for action in commands:
self.check_if_required("--env-id=1 node {0}".format(action))
messages = (
"Started provisioning nodes [1].\n",
"Started deploying nodes [1].\n"
)
for cmd, msg in zip(commands, messages):
self.check_for_stdout(
"--env-id=1 node {0} --node=1".format(cmd),
msg
)
def test_check_wrong_server(self):
os.environ["SERVER_ADDRESS"] = "0"
result = self.run_cli_command("-h", check_errors=True)
self.assertEqual(result.stderr, '')
del os.environ["SERVER_ADDRESS"]
def test_destroy_node(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller"
))
msg = ("Nodes with id [1] has been deleted from fuel db.\n"
"You should still delete node from cobbler\n")
self.check_for_stdout(
"node --node 1 --delete-from-db",
msg
)
def test_for_examples_in_action_help(self):
actions = (
"node", "stop", "deployment", "reset", "task", "network",
"settings", "provisioning", "environment", "deploy-changes",
"role", "release", "snapshot", "health"
)
for action in actions:
self.check_all_in_msg("{0} -h".format(action), ("Examples",))
def test_task_action_urls(self):
self.check_all_in_msg(
"task --task-id 1 --debug",
[
"GET http://127.0.0.1",
"/api/v1/tasks/1/"
],
check_errors=True
)
self.check_all_in_msg(
"task --task-id 1 --delete --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=0"
],
check_errors=True
)
self.check_all_in_msg(
"task --task-id 1 --delete --force --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=1"
],
check_errors=True
)
self.check_all_in_msg(
"task --tid 1 --delete --debug",
[
"DELETE http://127.0.0.1",
"/api/v1/tasks/1/?force=0"
],
check_errors=True
)
def test_get_release_list_without_errors(self):
cmd = 'release --list'
self.run_cli_command(cmd)
class TestUserActions(base.BaseTestCase):
def test_change_password_params(self):
cmd = "user change-password"
msg = "Expect password [--newpass NEWPASS]"
result = self.run_cli_command(cmd, check_errors=True)
self.assertTrue(msg, result)
class TestCharset(base.BaseTestCase):
def test_charset_problem(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=привет --release=1",
"--env-id=1 node set --node 1 --role=controller",
"env"
))
class TestFiles(base.BaseTestCase):
def test_file_creation(self):
self.load_data_to_nailgun_server()
self.run_cli_commands((
"env create --name=NewEnv --release=1",
"--env-id=1 node set --node 1 --role=controller",
"--env-id=1 node set --node 2,3 --role=compute"
))
for action in ("network", "settings"):
for format_ in ("yaml", "json"):
self.check_if_files_created(
"--env 1 {0} --download --{1}".format(action, format_),
("{0}_1.{1}".format(action, format_),)
)
command_to_files_map = (
(
"--env 1 deployment --default",
(
"deployment_1",
"deployment_1/primary-controller_1.yaml",
"deployment_1/compute_2.yaml",
"deployment_1/compute_3.yaml"
)
),
(
"--env 1 provisioning --default",
(
"provisioning_1",
"provisioning_1/engine.yaml",
"provisioning_1/node-1.yaml",
"provisioning_1/node-2.yaml",
"provisioning_1/node-3.yaml"
)
),
(
"--env 1 deployment --default --json",
(
"deployment_1/primary-controller_1.json",
"deployment_1/compute_2.json",
"deployment_1/compute_3.json"
)
),
(
"--env 1 provisioning --default --json",
(
"provisioning_1/engine.json",
"provisioning_1/node-1.json",
"provisioning_1/node-2.json",
"provisioning_1/node-3.json"
)
),
(
"node --node 1 --disk --default",
(
"node_1",
"node_1/disks.yaml"
)
),
(
"node --node 1 --network --default",
(
"node_1",
"node_1/interfaces.yaml"
)
),
(
"node --node 1 --disk --default --json",
(
"node_1/disks.json",
)
),
(
"node --node 1 --network --default --json",
(
"node_1/interfaces.json",
)
)
)
for command, files in command_to_files_map:
self.check_if_files_created(command, files)
def check_if_files_created(self, command, paths):
command_in_dir = "{0} --dir={1}".format(command, self.temp_directory)
self.run_cli_command(command_in_dir)
for path in paths:
self.assertTrue(os.path.exists(
os.path.join(self.temp_directory, path)
))
class TestDownloadUploadNodeAttributes(base.BaseTestCase):
def test_upload_download_interfaces(self):
self.load_data_to_nailgun_server()
cmd = "node --node-id 1 --network"
self.run_cli_commands((self.download_command(cmd),
self.upload_command(cmd)))
def test_upload_download_disks(self):
self.load_data_to_nailgun_server()
cmd = "node --node-id 1 --disk"
self.run_cli_commands((self.download_command(cmd),
self.upload_command(cmd)))
class TestDeployChanges(base.BaseTestCase):
def test_deploy_changes_no_failure(self):
self.load_data_to_nailgun_server()
env_create = "env create --name=test --release=1"
add_node = "--env-id=1 node set --node 1 --role=controller"
deploy_changes = "deploy-changes --env 1"
self.run_cli_commands((env_create, add_node, deploy_changes))
class TestAuthentication(base.UnitTestCase):
@patch('fuelclient.client.requests')
@patch('fuelclient.client.auth_client')
def test_wrong_credentials(self, mkeystone_cli, mrequests):
mkeystone_cli.return_value = Mock(auth_token='')
mrequests.get_request.return_value = Mock(status_code=200)
self.execute(
['fuel', '--user=a', '--password=a', 'node'])
mkeystone_cli.Client.assert_called_with(
username='a',
tenant_name='admin',
password='a',
auth_url='http://127.0.0.1:8003/keystone/v2.0')
self.execute(
['fuel', '--user=a', '--password', 'a', 'node'])
mkeystone_cli.Client.assert_called_with(
username='a',
tenant_name='admin',
password='a',
auth_url='http://1192.168.3.11:8003/keystone/v2.0')
|
[
"os.path.join",
"mock.patch",
"mock.Mock"
] |
[((10573, 10608), 'mock.patch', 'patch', (['"""fuelclient.client.requests"""'], {}), "('fuelclient.client.requests')\n", (10578, 10608), False, 'from mock import patch\n'), ((10614, 10652), 'mock.patch', 'patch', (['"""fuelclient.client.auth_client"""'], {}), "('fuelclient.client.auth_client')\n", (10619, 10652), False, 'from mock import patch\n'), ((10754, 10773), 'mock.Mock', 'Mock', ([], {'auth_token': '""""""'}), "(auth_token='')\n", (10758, 10773), False, 'from mock import Mock\n'), ((10819, 10840), 'mock.Mock', 'Mock', ([], {'status_code': '(200)'}), '(status_code=200)\n', (10823, 10840), False, 'from mock import Mock\n'), ((9530, 9569), 'os.path.join', 'os.path.join', (['self.temp_directory', 'path'], {}), '(self.temp_directory, path)\n', (9542, 9569), False, 'import os\n')]
|
from __future__ import print_function
from __future__ import division
from . import _C
import numpy as np
import fuzzytools.files as ftfiles
import fuzzytools.strings as ftstrings
from fuzzytools.datascience.cms import ConfusionMatrix
from fuzzytools.matplotlib.cm_plots import plot_custom_confusion_matrix
import matplotlib.pyplot as plt
from fuzzytools.datascience.xerror import XError
from IPython.display import display
from fuzzytools.strings import latex_bf_alphabet_count
from fuzzytools.latex.latex_tables import LatexTable
from fuzzytools.matplotlib.utils import save_fig
import fuzzytools.strings as strings
import lcfeatures.results.utils as utils
FIGSIZE = (6,5)
DPI = 200
RANDOM_STATE = None
NEW_ORDER_CLASS_NAMES = ['SNIa', 'SNIbc', 'SNII*', 'SLSN']
DICT_NAME = 'thdays_class_metrics'
###################################################################################################################################################
def plot_cm(rootdir, cfilename, kf, lcset_name, model_names,
figsize=FIGSIZE,
dpi=DPI,
new_order_class_names=NEW_ORDER_CLASS_NAMES,
dict_name=DICT_NAME,
alphabet_count=0,
verbose=0,
):
for model_name in model_names:
fmodel_name, mn_dict = utils.get_fmodel_name(model_name, returns_mn_dict=True)
method = mn_dict['method']
load_roodir = f'../save/{model_name}/performance/survey=alerceZTFv7.1~bands=gr~mode=onlySNe~method={method}'
print(load_roodir)
files, files_ids, kfs = ftfiles.gather_files_by_kfold(load_roodir, kf, lcset_name,
fext='d',
imbalanced_kf_mode='oversampling', # error oversampling
random_state=RANDOM_STATE,
)
print(f'{files_ids}({len(files_ids)}#)')
if len(files)==0:
continue
class_names = files[0]()['class_names']
features = files[0]()['features']
thdays = files[0]()['thdays']
rank = files[0]()['rank']
for f in features:
#print(f)
pass
thday = files[0]()['thdays'][-1]
xe_dict = {}
for metric_name in ['recall', 'f1score']:
xe_metric = XError([f()['thdays_class_metrics_df'].loc[f()['thdays_class_metrics_df']['_thday']==thday][f'b-{metric_name}'].item() for f in files])
xe_dict[f'b-{metric_name}'] = xe_metric
brecall_xe = xe_dict['b-recall']
bf1score_xe = xe_dict['b-f1score']
new_order_class_names = ['SNIa', 'SNIbc', 'SNIIbn', 'SLSN']
new_order_class_names = ['SNIa', 'SNIbc', 'SNII*', 'SLSN']
cm = ConfusionMatrix([f()['thdays_cm'][thday] for f in files], class_names)
cm.reorder_classes(new_order_class_names)
for c in new_order_class_names:
print(cm.get_diagonal_dict()[c].get_raw_repr(f'brf_{c}_tp'))
pass
true_label_d = {c:f'({k}#)' for c,k in zip(class_names, np.sum(files[0]()['thdays_cm'][thday], axis=1))}
rank = files[0]()['rank'] # just show one
rank.names = ['Feature name=\\verb+'+n+'+' for n in rank.names]
rank.values = [v*100 for v in rank.values]
rank_df = rank.get_df()
latex_table = LatexTable(rank_df,
label='tab:brf_ranking',
)
if verbose:
display(rank_df)
print(latex_table)
title = ''
title += f'{latex_bf_alphabet_count(alphabet_count)}{fmodel_name}'+'\n'
title += f'b-Recall={brecall_xe}; b-$F_1$score={bf1score_xe}'+'\n'
title += f'th-day={thday:.0f} [days]'+'\n'
fig, ax = plot_custom_confusion_matrix(cm,
title=title[:-1],
figsize=figsize,
dpi=dpi,
true_label_d=true_label_d,
lambda_c=lambda c:c.replace('*', ''),
)
save_fig(fig, f'../temp/exp=cm/{model_name}.pdf', closes_fig=0)
plt.show()
|
[
"matplotlib.pyplot.show",
"fuzzytools.latex.latex_tables.LatexTable",
"fuzzytools.files.gather_files_by_kfold",
"IPython.display.display",
"lcfeatures.results.utils.get_fmodel_name",
"fuzzytools.matplotlib.utils.save_fig",
"fuzzytools.strings.latex_bf_alphabet_count"
] |
[((1200, 1255), 'lcfeatures.results.utils.get_fmodel_name', 'utils.get_fmodel_name', (['model_name'], {'returns_mn_dict': '(True)'}), '(model_name, returns_mn_dict=True)\n', (1221, 1255), True, 'import lcfeatures.results.utils as utils\n'), ((1443, 1577), 'fuzzytools.files.gather_files_by_kfold', 'ftfiles.gather_files_by_kfold', (['load_roodir', 'kf', 'lcset_name'], {'fext': '"""d"""', 'imbalanced_kf_mode': '"""oversampling"""', 'random_state': 'RANDOM_STATE'}), "(load_roodir, kf, lcset_name, fext='d',\n imbalanced_kf_mode='oversampling', random_state=RANDOM_STATE)\n", (1472, 1577), True, 'import fuzzytools.files as ftfiles\n'), ((2886, 2930), 'fuzzytools.latex.latex_tables.LatexTable', 'LatexTable', (['rank_df'], {'label': '"""tab:brf_ranking"""'}), "(rank_df, label='tab:brf_ranking')\n", (2896, 2930), False, 'from fuzzytools.latex.latex_tables import LatexTable\n'), ((3373, 3436), 'fuzzytools.matplotlib.utils.save_fig', 'save_fig', (['fig', 'f"""../temp/exp=cm/{model_name}.pdf"""'], {'closes_fig': '(0)'}), "(fig, f'../temp/exp=cm/{model_name}.pdf', closes_fig=0)\n", (3381, 3436), False, 'from fuzzytools.matplotlib.utils import save_fig\n'), ((3439, 3449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3447, 3449), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2972), 'IPython.display.display', 'display', (['rank_df'], {}), '(rank_df)\n', (2963, 2972), False, 'from IPython.display import display\n'), ((3023, 3062), 'fuzzytools.strings.latex_bf_alphabet_count', 'latex_bf_alphabet_count', (['alphabet_count'], {}), '(alphabet_count)\n', (3046, 3062), False, 'from fuzzytools.strings import latex_bf_alphabet_count\n')]
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from .forms import fill_me_form
def fill_view2(request):
if request.method == 'POST':
print('watashi ga kitta 2 ')
form = fill_me_form(request.POST)
if form.is_valid():
data = form.cleaned_data
form.save()
print(data)
else:
form = fill_me_form()
return render(request, "Page2.html", {'form': form})
|
[
"django.shortcuts.render"
] |
[((436, 481), 'django.shortcuts.render', 'render', (['request', '"""Page2.html"""', "{'form': form}"], {}), "(request, 'Page2.html', {'form': form})\n", (442, 481), False, 'from django.shortcuts import render\n')]
|
#coding: utf8
import matplotlib.pyplot as plt
import math
import numpy as np
xList = range(100)
y1 = [x*x for x in xList]
y2 = [math.sin(x) for x in xList]
y3 = [math.sqrt(x) for x in xList]
def draw():
# plt.plot(y1, 'b-', label='y=x*x')
plt.plot(y2, label='y=sin(x)')
plt.plot(y3, 'r*', label='y=sqrt(x)')
plt.grid()
plt.legend()
plt.show()
def drawWithTicks():
# plt.plot(y1, 'b-', label='y=x*x')
plt.plot(y2, label='y=sin(x)')
plt.plot(y3, 'r*', label='y=sqrt(x)')
yticks = np.arange(min(y2), max(y2), 1) # 设置坐标轴刻度
plt.yticks(yticks)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) # 坐标轴用科学计数法
plt.grid()
plt.legend()
plt.show()
draw()
xList = range(1, 23)
y = [math.sqrt((x**3 + 7*x + 11) % 23) for x in xList]
y2 = [-math.sqrt((x**3 + 7*x + 11) % 23) for x in xList]
def drawElliptic():
plt.plot(xList, y)
plt.plot(xList, y2)
plt.grid()
plt.show()
drawElliptic()
|
[
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"math.sin",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.grid"
] |
[((130, 141), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (138, 141), False, 'import math\n'), ((164, 176), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (173, 176), False, 'import math\n'), ((252, 282), 'matplotlib.pyplot.plot', 'plt.plot', (['y2'], {'label': '"""y=sin(x)"""'}), "(y2, label='y=sin(x)')\n", (260, 282), True, 'import matplotlib.pyplot as plt\n'), ((287, 324), 'matplotlib.pyplot.plot', 'plt.plot', (['y3', '"""r*"""'], {'label': '"""y=sqrt(x)"""'}), "(y3, 'r*', label='y=sqrt(x)')\n", (295, 324), True, 'import matplotlib.pyplot as plt\n'), ((329, 339), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (337, 339), True, 'import matplotlib.pyplot as plt\n'), ((344, 356), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (354, 356), True, 'import matplotlib.pyplot as plt\n'), ((361, 371), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (369, 371), True, 'import matplotlib.pyplot as plt\n'), ((439, 469), 'matplotlib.pyplot.plot', 'plt.plot', (['y2'], {'label': '"""y=sin(x)"""'}), "(y2, label='y=sin(x)')\n", (447, 469), True, 'import matplotlib.pyplot as plt\n'), ((474, 511), 'matplotlib.pyplot.plot', 'plt.plot', (['y3', '"""r*"""'], {'label': '"""y=sqrt(x)"""'}), "(y3, 'r*', label='y=sqrt(x)')\n", (482, 511), True, 'import matplotlib.pyplot as plt\n'), ((570, 588), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks'], {}), '(yticks)\n', (580, 588), True, 'import matplotlib.pyplot as plt\n'), ((593, 654), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (613, 654), True, 'import matplotlib.pyplot as plt\n'), ((670, 680), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (678, 680), True, 'import matplotlib.pyplot as plt\n'), ((685, 697), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (695, 697), True, 'import matplotlib.pyplot as plt\n'), ((702, 712), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (710, 712), True, 'import matplotlib.pyplot as plt\n'), ((749, 786), 'math.sqrt', 'math.sqrt', (['((x ** 3 + 7 * x + 11) % 23)'], {}), '((x ** 3 + 7 * x + 11) % 23)\n', (758, 786), False, 'import math\n'), ((880, 898), 'matplotlib.pyplot.plot', 'plt.plot', (['xList', 'y'], {}), '(xList, y)\n', (888, 898), True, 'import matplotlib.pyplot as plt\n'), ((903, 922), 'matplotlib.pyplot.plot', 'plt.plot', (['xList', 'y2'], {}), '(xList, y2)\n', (911, 922), True, 'import matplotlib.pyplot as plt\n'), ((927, 937), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (935, 937), True, 'import matplotlib.pyplot as plt\n'), ((942, 952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (950, 952), True, 'import matplotlib.pyplot as plt\n'), ((806, 843), 'math.sqrt', 'math.sqrt', (['((x ** 3 + 7 * x + 11) % 23)'], {}), '((x ** 3 + 7 * x + 11) % 23)\n', (815, 843), False, 'import math\n')]
|
# Generated by Django 3.0.2 on 2020-01-16 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alacode', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='code',
name='q12',
field=models.CharField(default=0, help_text='Notes', max_length=500, verbose_name='q12'),
preserve_default=False,
),
migrations.AlterField(
model_name='code',
name='q1',
field=models.BooleanField(help_text='ERROR 1: the tweet has nothing to do with the societal discussion around vaccines (tick box & continue to next tweet)', verbose_name='q1'),
),
migrations.AlterField(
model_name='code',
name='q10',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of anger? (1 = Not at all; 5 = Extremely)', verbose_name='q10'),
),
migrations.AlterField(
model_name='code',
name='q11',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of fear? (1 = Not at all; 5 = Extremely)', verbose_name='q11'),
),
migrations.AlterField(
model_name='code',
name='q2',
field=models.BooleanField(help_text='ERROR 2: the link is not working / does not refer to a news article or blog (tick box & continue to next tweet', verbose_name='q2'),
),
migrations.AlterField(
model_name='code',
name='q3',
field=models.BooleanField(help_text="Tick the box if the tweet doesn't contain any text next to the link", verbose_name='q3'),
),
migrations.AlterField(
model_name='code',
name='q4',
field=models.BooleanField(help_text='Tick the box if the tweet only contains the title/header of the shared article', verbose_name='q4'),
),
migrations.AlterField(
model_name='code',
name='q5',
field=models.IntegerField(choices=[(0, 'The source does not contain a discernible opinion on vaccines'), (1, 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (5, 'Strongly In Favor')], default=0, help_text='To what extent would you describe the shared article as in favor or against the use of vaccines?', verbose_name='q5'),
),
migrations.AlterField(
model_name='code',
name='q6',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion on vaccines'), (1, 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (5, 'Strongly In Favor')], default=0, help_text='To what extent would you describe the text in the tweet as in favor or against the use of vaccines?', verbose_name='q6'),
),
migrations.AlterField(
model_name='code',
name='q7',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion towards the source'), (1, 'Strongly disagrees'), (2, 'disagrees'), (3, 'Neutral'), (4, 'Agrees'), (5, 'Strongly agrees')], default=0, help_text='To what extent does the text in the tweet (dis)agree withqi the source?', verbose_name='q7'),
),
migrations.AlterField(
model_name='code',
name='q8',
field=models.IntegerField(choices=[(0, 'The tweet does not contain a discernible opinion towards the source'), (1, 'Very Negative'), (2, 'Negative'), (3, 'Neutral'), (4, 'Positive'), (5, 'Very positive')], default=0, help_text='To what extent would you describe the text in the tweet as positive or negative towards the source?', verbose_name='q8'),
),
migrations.AlterField(
model_name='code',
name='q9',
field=models.IntegerField(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=0, help_text='On a scale from 1 to 5, to what extent does the tweet express feelings of enthusiasm? (1 = Not at all; 5 = Extremely)', verbose_name='q9'),
),
]
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.BooleanField"
] |
[((318, 405), 'django.db.models.CharField', 'models.CharField', ([], {'default': '(0)', 'help_text': '"""Notes"""', 'max_length': '(500)', 'verbose_name': '"""q12"""'}), "(default=0, help_text='Notes', max_length=500, verbose_name\n ='q12')\n", (334, 405), False, 'from django.db import migrations, models\n'), ((552, 731), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'help_text': '"""ERROR 1: the tweet has nothing to do with the societal discussion around vaccines (tick box & continue to next tweet)"""', 'verbose_name': '"""q1"""'}), "(help_text=\n 'ERROR 1: the tweet has nothing to do with the societal discussion around vaccines (tick box & continue to next tweet)'\n , verbose_name='q1')\n", (571, 731), False, 'from django.db import migrations, models\n'), ((838, 1099), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')]", 'default': '(0)', 'help_text': '"""On a scale from 1 to 5, to what extent does the tweet express feelings of anger? (1 = Not at all; 5 = Extremely)"""', 'verbose_name': '"""q10"""'}), "(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4,\n '4'), (5, '5')], default=0, help_text=\n 'On a scale from 1 to 5, to what extent does the tweet express feelings of anger? (1 = Not at all; 5 = Extremely)'\n , verbose_name='q10')\n", (857, 1099), False, 'from django.db import migrations, models\n'), ((1202, 1462), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')]", 'default': '(0)', 'help_text': '"""On a scale from 1 to 5, to what extent does the tweet express feelings of fear? (1 = Not at all; 5 = Extremely)"""', 'verbose_name': '"""q11"""'}), "(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4,\n '4'), (5, '5')], default=0, help_text=\n 'On a scale from 1 to 5, to what extent does the tweet express feelings of fear? (1 = Not at all; 5 = Extremely)'\n , verbose_name='q11')\n", (1221, 1462), False, 'from django.db import migrations, models\n'), ((1564, 1736), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'help_text': '"""ERROR 2: the link is not working / does not refer to a news article or blog (tick box & continue to next tweet"""', 'verbose_name': '"""q2"""'}), "(help_text=\n 'ERROR 2: the link is not working / does not refer to a news article or blog (tick box & continue to next tweet'\n , verbose_name='q2')\n", (1583, 1736), False, 'from django.db import migrations, models\n'), ((1842, 1970), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'help_text': '"""Tick the box if the tweet doesn\'t contain any text next to the link"""', 'verbose_name': '"""q3"""'}), '(help_text=\n "Tick the box if the tweet doesn\'t contain any text next to the link",\n verbose_name=\'q3\')\n', (1861, 1970), False, 'from django.db import migrations, models\n'), ((2077, 2217), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'help_text': '"""Tick the box if the tweet only contains the title/header of the shared article"""', 'verbose_name': '"""q4"""'}), "(help_text=\n 'Tick the box if the tweet only contains the title/header of the shared article'\n , verbose_name='q4')\n", (2096, 2217), False, 'from django.db import migrations, models\n'), ((2323, 2685), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'The source does not contain a discernible opinion on vaccines'), (1,\n 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (\n 5, 'Strongly In Favor')]", 'default': '(0)', 'help_text': '"""To what extent would you describe the shared article as in favor or against the use of vaccines?"""', 'verbose_name': '"""q5"""'}), "(choices=[(0,\n 'The source does not contain a discernible opinion on vaccines'), (1,\n 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (\n 5, 'Strongly In Favor')], default=0, help_text=\n 'To what extent would you describe the shared article as in favor or against the use of vaccines?'\n , verbose_name='q5')\n", (2342, 2685), False, 'from django.db import migrations, models\n'), ((2778, 3142), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'The tweet does not contain a discernible opinion on vaccines'), (1,\n 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (\n 5, 'Strongly In Favor')]", 'default': '(0)', 'help_text': '"""To what extent would you describe the text in the tweet as in favor or against the use of vaccines?"""', 'verbose_name': '"""q6"""'}), "(choices=[(0,\n 'The tweet does not contain a discernible opinion on vaccines'), (1,\n 'Strongly Against'), (2, 'Against'), (3, 'Neutral'), (4, 'In Favor'), (\n 5, 'Strongly In Favor')], default=0, help_text=\n 'To what extent would you describe the text in the tweet as in favor or against the use of vaccines?'\n , verbose_name='q6')\n", (2797, 3142), False, 'from django.db import migrations, models\n'), ((3235, 3576), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'The tweet does not contain a discernible opinion towards the source'),\n (1, 'Strongly disagrees'), (2, 'disagrees'), (3, 'Neutral'), (4,\n 'Agrees'), (5, 'Strongly agrees')]", 'default': '(0)', 'help_text': '"""To what extent does the text in the tweet (dis)agree withqi the source?"""', 'verbose_name': '"""q7"""'}), "(choices=[(0,\n 'The tweet does not contain a discernible opinion towards the source'),\n (1, 'Strongly disagrees'), (2, 'disagrees'), (3, 'Neutral'), (4,\n 'Agrees'), (5, 'Strongly agrees')], default=0, help_text=\n 'To what extent does the text in the tweet (dis)agree withqi the source?',\n verbose_name='q7')\n", (3254, 3576), False, 'from django.db import migrations, models\n'), ((3671, 4035), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'The tweet does not contain a discernible opinion towards the source'),\n (1, 'Very Negative'), (2, 'Negative'), (3, 'Neutral'), (4, 'Positive'),\n (5, 'Very positive')]", 'default': '(0)', 'help_text': '"""To what extent would you describe the text in the tweet as positive or negative towards the source?"""', 'verbose_name': '"""q8"""'}), "(choices=[(0,\n 'The tweet does not contain a discernible opinion towards the source'),\n (1, 'Very Negative'), (2, 'Negative'), (3, 'Neutral'), (4, 'Positive'),\n (5, 'Very positive')], default=0, help_text=\n 'To what extent would you describe the text in the tweet as positive or negative towards the source?'\n , verbose_name='q8')\n", (3690, 4035), False, 'from django.db import migrations, models\n'), ((4129, 4394), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')]", 'default': '(0)', 'help_text': '"""On a scale from 1 to 5, to what extent does the tweet express feelings of enthusiasm? (1 = Not at all; 5 = Extremely)"""', 'verbose_name': '"""q9"""'}), "(choices=[(0, 'NA'), (1, '1'), (2, '2'), (3, '3'), (4,\n '4'), (5, '5')], default=0, help_text=\n 'On a scale from 1 to 5, to what extent does the tweet express feelings of enthusiasm? (1 = Not at all; 5 = Extremely)'\n , verbose_name='q9')\n", (4148, 4394), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from wikiapi import WikiApi
import requests, pprint
# This is suitable for extracting content that is organized by categories and sub-categories
# This code requires the wiki-api python library created by <NAME> of UK
# https://github.com/richardasaurus/wiki-api
# Note that the Wikipedia categories and sub-categories are not in a tree structure. There are circular references.
wiki = WikiApi()
wiki = WikiApi({ 'locale' : 'ta'}) # to specify your locale, 'en' is default
# Get the page text of the article with the given title
def getArticleParagraphs(title):
articleFull = wiki.get_article(title)
fullText = articleFull.content
chapter = ""
paragraphs = fullText.split('\n\n')
# print(paragraphs)
# We want only whole paragraphs that end in a ".", "!", "?" or '"' not fragments
for paragraph in paragraphs:
if len(paragraph) > 30:
end = paragraph[-1]
if end == '.' or end == '!' or end == '?' or end == '"':
chapter = chapter + "\n\n" + paragraph
return chapter
def __getTitlesForCategory(title,f):
# url = 'https://ta.wikipedia.org/w/api.php?action=query&list=categorymembers&cmnamespace=14&cmlimit=500&format=json&cmtitle=Category:வரலாறு'
# http://ta.wikipedia.org/w/api.php?action=query # Base Url
# &format=json # want data in JSON, default is XML
# &cmlimit=500 # முதல் 500 துணைப் பகுப்புகள் / கட்டுரைகள்
# &cmnamespace=14 # 14 - துணைப் பகுப்புகள்; 0 - கட்டுரைகள்
# &list=categorymembers
# &cmtitle=Category:வரலாறு # பகுப்பு = வரலாறு
articleTitles = []
baseUrl = 'https://ta.wikipedia.org/w/api.php?action=query&list=categorymembers&cmlimit=500&format=json'
# For extracting the Wikisource content
# In the wikiapi.py file change the following two lines
# api_uri = 'wikisource.org/w/api.php'
# article_uri = 'wikisource.org/wiki/'
# And change the baseUrl here as follows
# baseUrl = 'https://ta.wikisource.org/w/api.php?action=query&list=categorymembers&cmlimit=500&format=json'
namespaceUrl = '&cmnamespace='
categoryUrl = '&cmtitle=Category:'
articleNamespace = '0'
categoryNamespace = '14'
url = baseUrl + namespaceUrl + articleNamespace + categoryUrl + title
# print(url)
data = requests.get(url)
result = data.json()
pprint.pprint(result)
# Get all the article titles and write to the list
for item in result["query"]["categorymembers"]:
print(str(len(articleTitles)) + ": " + item['title'])
# Skip duplicate titles that are already in the list
if item['title'] not in articleTitles:
articleTitles.append(item['title'])
f.write(getArticleParagraphs(item['title']))
else:
break
# Safety check to avoid an infinite loop
if len(articleTitles) > 15000:
return
# Get all the sub-categories
url = baseUrl + namespaceUrl + categoryNamespace + categoryUrl + title
# print(url)
data = requests.get(url)
result = data.json()
# For each sub-category
for item in result["query"]["categorymembers"]:
print("Item title: " + item['title'])
# When we get the title of categories, we have to strip out the first 8 characters to get the title
cat = item['title'][8:]
getTitlesForCategory(cat)
def getTitlesForCategory(category='வரலாறு',outputfile='/your/folder/wikipedia_content.txt'):
f = open(outputfile, 'wt', encoding='utf-8')
articleTitles = __getTitlesForCategory(category,f)
print(len(articleTitles))
return articleTitles
|
[
"pprint.pprint",
"wikiapi.WikiApi",
"requests.get"
] |
[((432, 441), 'wikiapi.WikiApi', 'WikiApi', ([], {}), '()\n', (439, 441), False, 'from wikiapi import WikiApi\n'), ((449, 474), 'wikiapi.WikiApi', 'WikiApi', (["{'locale': 'ta'}"], {}), "({'locale': 'ta'})\n", (456, 474), False, 'from wikiapi import WikiApi\n'), ((2510, 2527), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2522, 2527), False, 'import requests, pprint\n'), ((2557, 2578), 'pprint.pprint', 'pprint.pprint', (['result'], {}), '(result)\n', (2570, 2578), False, 'import requests, pprint\n'), ((3238, 3255), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3250, 3255), False, 'import requests, pprint\n')]
|
import datetime
from google.auth import credentials
import json
class WorkloadIdentityCredentials(credentials.Scoped, credentials.Credentials):
def __init__(self, scopes):
super(WorkloadIdentityCredentials, self).__init__()
self._scopes = scopes
def with_scopes(self, scopes):
return WorkloadIdentityCredentials(scopes=scopes)
@property
def requires_scopes(self):
return False
def refresh(self, request):
url = ('http://metadata.google.internal/computeMetadata/'
'v1/instance/service-accounts/default/token')
if self._scopes:
url += '?scopes=' + ','.join(self._scopes)
response = request(url=url, method="GET", headers={
'Metadata-Flavor': 'Google'})
if response.status == 200:
response_json = json.loads(response.data)
else:
raise RuntimeError('bad status from metadata server')
self.token = response_json['access_token']
self.expiry = datetime.datetime.utcnow(
) + datetime.timedelta(seconds=response_json['expires_in'])
|
[
"datetime.datetime.utcnow",
"datetime.timedelta",
"json.loads"
] |
[((849, 874), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (859, 874), False, 'import json\n'), ((1028, 1054), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1052, 1054), False, 'import datetime\n'), ((1066, 1121), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': "response_json['expires_in']"}), "(seconds=response_json['expires_in'])\n", (1084, 1121), False, 'import datetime\n')]
|
# Generated by Django 3.2.5 on 2021-08-18 05:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0028_alter_participantrepayment_payment_1'),
]
operations = [
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_1',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 1'),
),
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_2',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 2'),
),
migrations.AddField(
model_name='participantrepayment',
name='pay_mount_3',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Nominal Pembayaran Ke 3'),
),
]
|
[
"django.db.models.CharField"
] |
[((373, 473), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""Nominal Pembayaran Ke 1"""'}), "(blank=True, max_length=100, null=True, verbose_name=\n 'Nominal Pembayaran Ke 1')\n", (389, 473), False, 'from django.db import migrations, models\n'), ((607, 707), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""Nominal Pembayaran Ke 2"""'}), "(blank=True, max_length=100, null=True, verbose_name=\n 'Nominal Pembayaran Ke 2')\n", (623, 707), False, 'from django.db import migrations, models\n'), ((841, 941), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""Nominal Pembayaran Ke 3"""'}), "(blank=True, max_length=100, null=True, verbose_name=\n 'Nominal Pembayaran Ke 3')\n", (857, 941), False, 'from django.db import migrations, models\n')]
|
# --------------------------------------------------------
# DenseFusion 6D Object Pose Estimation by Iterative Dense Fusion
# Licensed under The MIT License [see LICENSE for details]
# Written by Chen
# --------------------------------------------------------
import argparse
import os
import random
import time
import numpy as np
import torch
from pathlib import Path
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset
from DenseFusion.lib.network import PoseNet, PoseRefineNet
from DenseFusion.lib.loss import Loss
from DenseFusion.lib.loss_refiner import Loss_refine
#import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
import pc_reconstruction.open3d_utils as pc_utils
import json
from DenseFusion.tools.utils import *
from DenseFusion.lib.transformations import quaternion_matrix
def main(data_set_name, root, save_extra='', load_pretrained=True, load_trained=False, load_name='',
label_mode='new_pred', p_extra_data=0.0, p_viewpoints=1.0, show_sample=False, plot_train=False, device_num=0):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='batch size')
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help='learning rate')
parser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--w', default=0.015, help='learning rate')
parser.add_argument('--w_rate', default=0.3, help='learning rate decay rate')
parser.add_argument('--decay_margin', default=0.016, help='margin to decay lr & w')
parser.add_argument('--refine_margin', default=0.010, help='margin to start the training of iterative refinement')
parser.add_argument('--noise_trans', default=0.03,
help='range of the random noise of translation added to the training data')
parser.add_argument('--iteration', type=int, default=2, help='number of refinement iterations')
parser.add_argument('--nepoch', type=int, default=500, help='max number of epochs to train')
parser.add_argument('--refine_epoch_margin', type=int, default=400, help='max number of epochs to train')
parser.add_argument('--start_epoch', type=int, default=1, help='which epoch to start')
opt = parser.parse_args()
opt.manualSeed = random.randint(1, 10000)
torch.cuda.set_device(device_num)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
print('bs', opt.batch_size, 'it', opt.iteration)
opt.refine_start = False
opt.num_points = 1000 #number of points on the input pointcloud
opt.outf = os.path.join(root, 'DenseFusion/trained_models', data_set_name+save_extra) #folder to save trained models
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
opt.log_dir = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra) #folder to save logs
opt.log_dir_images = os.path.join(root, 'DenseFusion/experiments/logs', data_set_name+save_extra, 'images')
if not os.path.exists(opt.log_dir):
os.makedirs(opt.log_dir)
if not os.path.exists(opt.log_dir_images):
os.makedirs(opt.log_dir_images)
opt.repeat_epoch = 1 #number of repeat times for one epoch training
print('create datasets')
dataset = PoseDataset('train',
opt.num_points,
True,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
test_dataset = PoseDataset('test',
opt.num_points,
False,
0.0,
opt.refine_start,
data_set_name,
root,
show_sample=show_sample,
label_mode=label_mode,
p_extra_data=p_extra_data,
p_viewpoints=p_viewpoints)
opt.num_objects = dataset.num_classes #number of object classes in the dataset
print('n classes: {}'.format(dataset.num_classes))
print('create models')
estimator = PoseNet(num_points=opt.num_points, num_obj=opt.num_objects)
estimator.cuda()
refiner = PoseRefineNet(num_points=opt.num_points, num_obj=opt.num_objects)
refiner.cuda()
if load_pretrained:
# load the pretrained estimator model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = estimator.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_model.pth'))
pretrained_dict['conv4_r.weight'] = init_state_dict['conv4_r.weight']
pretrained_dict['conv4_r.bias'] = init_state_dict['conv4_r.bias']
pretrained_dict['conv4_t.weight'] = init_state_dict['conv4_t.weight']
pretrained_dict['conv4_t.bias'] = init_state_dict['conv4_t.bias']
pretrained_dict['conv4_c.weight'] = init_state_dict['conv4_c.weight']
pretrained_dict['conv4_c.bias'] = init_state_dict['conv4_c.bias']
estimator.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
# load the pretrained refiner model on the ycb dataset, leave the last layer due to mismatch
init_state_dict = refiner.state_dict()
pretrained_dict = torch.load(os.path.join(root, 'DenseFusion/trained_models/pose_refine_model.pth'))
pretrained_dict['conv3_r.weight'] = init_state_dict['conv3_r.weight']
pretrained_dict['conv3_r.bias'] = init_state_dict['conv3_r.bias']
pretrained_dict['conv3_t.weight'] = init_state_dict['conv3_t.weight']
pretrained_dict['conv3_t.bias'] = init_state_dict['conv3_t.bias']
refiner.load_state_dict(pretrained_dict)
del init_state_dict
del pretrained_dict
elif load_trained:
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
estimator.load_state_dict(pretrained_dict)
loading_path = os.path.join(root, 'DenseFusion/trained_models/{}/pose_refine_model.pth'.format(load_name))
pretrained_dict = torch.load(loading_path)
refiner.load_state_dict(pretrained_dict)
del pretrained_dict
print('create optimizer and dataloader')
#opt.refine_start = False
opt.decay_start = False
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=opt.workers,
# collate_fn=collate_fn)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
opt.sym_list = dataset.get_sym_list()
opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------Dataset loaded!---------<<<<<<<<\nlength of the training set: {0}'
'\nlength of the testing set: {1}\nnumber of sample points on mesh: {2}\nsymmetry object list: {3}'.format(
len(dataset), len(test_dataset), opt.num_points_mesh, opt.sym_list))
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
best_test = np.Inf
best_test_epoch = 0
best_train = np.Inf
best_train_epoch = 0
if opt.start_epoch == 1:
for log in os.listdir(opt.log_dir):
if log !='images':
os.remove(os.path.join(opt.log_dir, log))
for img in os.listdir(opt.log_dir_images):
os.remove(os.path.join(opt.log_dir_images, img))
train_dists = []
test_dists = []
losses = []
refiner_losses = []
best_loss = np.inf
best_loss_epoch = 0
elapsed_times = 0.0
for epoch in range(opt.start_epoch, opt.nepoch):
start_time = time.time()
train_count = 0
train_dis_avg = 0.0
if opt.refine_start:
estimator.eval()
refiner.train()
else:
estimator.train()
optimizer.zero_grad()
epoch_losses = []
epoch_losses_refiner = []
for rep in range(opt.repeat_epoch):
#for batch in dataloader:
#points, choose, img, target, model_points, idx = batch
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
for i, data in enumerate(dataloader, 0):
points, choose, img, target, model_points, idx = data
#print(points.shape, choose.shape, img.shape, target.shape, model_points.shape)
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
loss, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
epoch_losses.append(loss.item())
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
dis.backward()
epoch_losses_refiner.append(dis.item())
else:
loss.backward()
epoch_losses_refiner.append(0)
train_dis_avg += dis.item()
train_count += 1
# make step after one epoch
if train_count % opt.batch_size == 0:
optimizer.step()
optimizer.zero_grad()
# make last step of epoch if something is remaining
if train_count % opt.batch_size != 0:
optimizer.step()
optimizer.zero_grad()
refiner_losses.append(np.mean(epoch_losses_refiner))
losses.append(np.mean(epoch_losses))
if losses[-1] < best_loss:
best_loss = losses[-1]
best_loss_epoch = epoch
train_dists.append(train_dis_avg/train_count)
if train_dists[-1] < best_train:
best_train_epoch = epoch
best_train = train_dists[-1]
test_dis = 0.0
test_count = 0
estimator.eval()
refiner.eval()
if plot_train:
# plot randomly selected validation preds
jj = 0
x_axis = 0
fig_x = 4
fig_y = 4
log_indexes = sorted(list(np.random.choice(list(range(len(testdataloader))), int(fig_x*(fig_y/2)), replace=False)))
plt.cla()
plt.close('all')
fig, axs = plt.subplots(fig_x, fig_y, constrained_layout=True, figsize=(25, 15))
for j, data in enumerate(testdataloader, 0):
points, choose, img, target, model_points, idx, intr, np_img = data
points, choose, img, target, model_points, idx = Variable(points).cuda(), \
Variable(choose).cuda(), \
Variable(img).cuda(), \
Variable(target).cuda(), \
Variable(model_points).cuda(), \
Variable(idx).cuda()
pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_estimator_prediction(pred_r, pred_t, pred_c, opt.num_points, 1, points)
_, dis, new_points, new_target, pred = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w, opt.refine_start)
if opt.refine_start:
for ite in range(0, opt.iteration):
pred_r, pred_t = refiner(new_points, emb, idx)
if plot_train:
if j in log_indexes:
my_pred, my_r, my_t = my_refined_prediction(pred_r, pred_t, my_r, my_t)
dis, new_points, new_target, pred = criterion_refine(pred_r, pred_t, new_target, model_points, idx, new_points)
if plot_train:
if j in log_indexes:
if jj == 4:
jj = 0
x_axis += 1
my_r = quaternion_matrix(my_r)[:3, :3]
np_pred = np.dot(model_points[0].data.cpu().numpy(), my_r.T)
np_pred = np.add(np_pred, my_t)
np_target = target[0].data.cpu().numpy()
np_img = np_img[0].data.numpy()
image_target = pc_utils.pointcloud2image(np_img.copy(), np_target, 3, intr)
image_prediction = pc_utils.pointcloud2image(np_img.copy(), np_pred, 3, intr)
axs[x_axis, jj].imshow(image_target)
axs[x_axis, jj].set_title('target {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
axs[x_axis, jj].imshow(image_prediction)
axs[x_axis, jj].set_title('prediction {}'.format(j))
axs[x_axis, jj].set_axis_off()
jj += 1
test_dis += dis.item()
test_count += 1
test_dis = test_dis / test_count
test_dists.append(test_dis)
if plot_train:
fig.suptitle('epoch {}, with a average dist: {}'.format(epoch, test_dis), fontsize=16)
plt.savefig(os.path.join(opt.log_dir_images, 'test_images_epoch_{}.png'.format(epoch)))
if epoch > 1:
plt.close('all')
plt.cla()
fig, axs = plt.subplots(2, 2, constrained_layout=True, figsize=(30, 20))
axs[0, 0].plot(losses)
axs[0, 0].set_title('Training estimator loss')
axs[0, 0].set_xlabel('Epochs')
axs[0, 0].set_ylabel('Loss')
axs[0, 1].plot(refiner_losses)
axs[0, 1].set_title('Training refiner loss')
axs[0, 1].set_xlabel('Epochs')
axs[0, 1].set_ylabel('Loss')
axs[1, 0].plot(train_dists)
axs[1, 0].set_title('Training Avg. distance')
axs[1, 0].set_xlabel('Epochs')
axs[1, 0].set_ylabel('Avg. distance [m]')
axs[1, 1].plot(test_dists)
axs[1, 1].set_title('Test Avg. distance')
axs[1, 1].set_xlabel('Epochs')
axs[1, 1].set_ylabel('Avg. distance [m]')
plt.savefig(os.path.join(opt.log_dir_images, 'losses.png'))
out_dict = {
'losses': losses,
'refiner_losses': refiner_losses,
'train_dists': train_dists,
'test_dists': test_dists
}
with open(os.path.join(opt.log_dir, 'losses.json'), 'w') as outfile:
json.dump(out_dict, outfile)
del out_dict
print('>>>>>>>>----------Epoch {0} finished---------<<<<<<<<'.format(epoch))
if test_dis <= best_test:
best_test = test_dis
best_test_epoch = epoch
if opt.refine_start:
state_dict = refiner.state_dict()
torch.save(state_dict, '{0}/pose_refine_model.pth'.format(opt.outf))
del state_dict
else:
state_dict = estimator.state_dict()
torch.save(state_dict, '{0}/pose_model.pth'.format(opt.outf))
del state_dict
print('>>>>>>>>----------MODEL SAVED---------<<<<<<<<')
t_elapsed = time.time() - start_time
elapsed_times += t_elapsed/3600
print('elapsed time: {} min, total elapsed time: {} hours'.format(
np.round(t_elapsed/60, 2), np.round(elapsed_times), 2))
print('Train loss : {}'.format(losses[-1]))
print('Best train loss {} : {}'.format(best_loss_epoch, best_loss))
print('Train dist : {}'.format(train_dists[-1]))
print('Best train dist {} : {}'.format(best_train_epoch, best_train))
print('Test dist : {}'.format(test_dists[-1]))
print('Best test dist {} : {}'.format(best_test_epoch, best_test))
# changing stuff during training if...
if best_test < opt.decay_margin and not opt.decay_start:
print('decay lr')
opt.decay_start = True
opt.lr *= opt.lr_rate
opt.w *= opt.w_rate
optimizer = optim.Adam(estimator.parameters(), lr=opt.lr)
if (best_test < opt.refine_margin or epoch >= opt.refine_epoch_margin) and not opt.refine_start:
#print('train refiner')
opt.refine_start = True
print('bs', opt.batch_size, 'it', opt.iteration)
opt.batch_size = int(opt.batch_size / opt.iteration)
print('new bs', opt.batch_size)
optimizer = optim.Adam(refiner.parameters(), lr=opt.lr)
#dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=opt.workers)
#testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
#opt.sym_list = dataset.get_sym_list()
#opt.num_points_mesh = dataset.get_num_points_mesh()
print('>>>>>>>>----------train refiner!---------<<<<<<<<')
criterion = Loss(opt.num_points_mesh, opt.sym_list)
criterion_refine = Loss_refine(opt.num_points_mesh, opt.sym_list)
if __name__ == '__main__':
data_set_name = 'bluedude_solo'
save_extra = '_test4'
root = Path(__file__).resolve().parent.parent.parent
main(data_set_name, root, save_extra=save_extra)
|
[
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.mean",
"os.path.join",
"numpy.round",
"random.randint",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"torch.load",
"os.path.exists",
"DenseFusion.lib.network.PoseNet",
"random.seed",
"matplotlib.pyplot.cla",
"DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset",
"torch.cuda.set_device",
"numpy.add",
"matplotlib.pyplot.subplots",
"json.dump",
"DenseFusion.lib.loss.Loss",
"torch.manual_seed",
"torch.autograd.Variable",
"DenseFusion.lib.network.PoseRefineNet",
"DenseFusion.lib.transformations.quaternion_matrix",
"os.listdir",
"DenseFusion.lib.loss_refiner.Loss_refine",
"os.makedirs",
"time.time"
] |
[((1184, 1209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1207, 1209), False, 'import argparse\n'), ((2502, 2526), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2516, 2526), False, 'import random\n'), ((2532, 2565), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device_num'], {}), '(device_num)\n', (2553, 2565), False, 'import torch\n'), ((2571, 2598), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2582, 2598), False, 'import random\n'), ((2603, 2636), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2620, 2636), False, 'import torch\n'), ((2804, 2880), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models"""', '(data_set_name + save_extra)'], {}), "(root, 'DenseFusion/trained_models', data_set_name + save_extra)\n", (2816, 2880), False, 'import os\n'), ((2995, 3073), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/experiments/logs"""', '(data_set_name + save_extra)'], {}), "(root, 'DenseFusion/experiments/logs', data_set_name + save_extra)\n", (3007, 3073), False, 'import os\n'), ((3118, 3210), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/experiments/logs"""', '(data_set_name + save_extra)', '"""images"""'], {}), "(root, 'DenseFusion/experiments/logs', data_set_name +\n save_extra, 'images')\n", (3130, 3210), False, 'import os\n'), ((3482, 3678), 'DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset', 'PoseDataset', (['"""train"""', 'opt.num_points', '(True)', '(0.0)', 'opt.refine_start', 'data_set_name', 'root'], {'show_sample': 'show_sample', 'label_mode': 'label_mode', 'p_extra_data': 'p_extra_data', 'p_viewpoints': 'p_viewpoints'}), "('train', opt.num_points, True, 0.0, opt.refine_start,\n data_set_name, root, show_sample=show_sample, label_mode=label_mode,\n p_extra_data=p_extra_data, p_viewpoints=p_viewpoints)\n", (3493, 3678), False, 'from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset\n'), ((3951, 4147), 'DenseFusion.datasets.myDatasetAugmented.dataset.PoseDataset', 'PoseDataset', (['"""test"""', 'opt.num_points', '(False)', '(0.0)', 'opt.refine_start', 'data_set_name', 'root'], {'show_sample': 'show_sample', 'label_mode': 'label_mode', 'p_extra_data': 'p_extra_data', 'p_viewpoints': 'p_viewpoints'}), "('test', opt.num_points, False, 0.0, opt.refine_start,\n data_set_name, root, show_sample=show_sample, label_mode=label_mode,\n p_extra_data=p_extra_data, p_viewpoints=p_viewpoints)\n", (3962, 4147), False, 'from DenseFusion.datasets.myDatasetAugmented.dataset import PoseDataset\n'), ((4634, 4693), 'DenseFusion.lib.network.PoseNet', 'PoseNet', ([], {'num_points': 'opt.num_points', 'num_obj': 'opt.num_objects'}), '(num_points=opt.num_points, num_obj=opt.num_objects)\n', (4641, 4693), False, 'from DenseFusion.lib.network import PoseNet, PoseRefineNet\n'), ((4729, 4794), 'DenseFusion.lib.network.PoseRefineNet', 'PoseRefineNet', ([], {'num_points': 'opt.num_points', 'num_obj': 'opt.num_objects'}), '(num_points=opt.num_points, num_obj=opt.num_objects)\n', (4742, 4794), False, 'from DenseFusion.lib.network import PoseNet, PoseRefineNet\n'), ((7164, 7257), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': 'opt.workers'}), '(dataset, batch_size=1, shuffle=True,\n num_workers=opt.workers)\n', (7191, 7257), False, 'import torch\n'), ((7276, 7375), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'opt.workers'}), '(test_dataset, batch_size=1, shuffle=False,\n num_workers=opt.workers)\n', (7303, 7375), False, 'import torch\n'), ((7784, 7823), 'DenseFusion.lib.loss.Loss', 'Loss', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (7788, 7823), False, 'from DenseFusion.lib.loss import Loss\n'), ((7847, 7893), 'DenseFusion.lib.loss_refiner.Loss_refine', 'Loss_refine', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (7858, 7893), False, 'from DenseFusion.lib.loss_refiner import Loss_refine\n'), ((2921, 2945), 'os.path.exists', 'os.path.exists', (['opt.outf'], {}), '(opt.outf)\n', (2935, 2945), False, 'import os\n'), ((2955, 2976), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (2966, 2976), False, 'import os\n'), ((3216, 3243), 'os.path.exists', 'os.path.exists', (['opt.log_dir'], {}), '(opt.log_dir)\n', (3230, 3243), False, 'import os\n'), ((3253, 3277), 'os.makedirs', 'os.makedirs', (['opt.log_dir'], {}), '(opt.log_dir)\n', (3264, 3277), False, 'import os\n'), ((3289, 3323), 'os.path.exists', 'os.path.exists', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (3303, 3323), False, 'import os\n'), ((3333, 3364), 'os.makedirs', 'os.makedirs', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (3344, 3364), False, 'import os\n'), ((8039, 8062), 'os.listdir', 'os.listdir', (['opt.log_dir'], {}), '(opt.log_dir)\n', (8049, 8062), False, 'import os\n'), ((8172, 8202), 'os.listdir', 'os.listdir', (['opt.log_dir_images'], {}), '(opt.log_dir_images)\n', (8182, 8202), False, 'import os\n'), ((8493, 8504), 'time.time', 'time.time', ([], {}), '()\n', (8502, 8504), False, 'import time\n'), ((5028, 5091), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models/pose_model.pth"""'], {}), "(root, 'DenseFusion/trained_models/pose_model.pth')\n", (5040, 5091), False, 'import os\n'), ((5843, 5913), 'os.path.join', 'os.path.join', (['root', '"""DenseFusion/trained_models/pose_refine_model.pth"""'], {}), "(root, 'DenseFusion/trained_models/pose_refine_model.pth')\n", (5855, 5913), False, 'import os\n'), ((6482, 6506), 'torch.load', 'torch.load', (['loading_path'], {}), '(loading_path)\n', (6492, 6506), False, 'import torch\n'), ((6700, 6724), 'torch.load', 'torch.load', (['loading_path'], {}), '(loading_path)\n', (6710, 6724), False, 'import torch\n'), ((11062, 11091), 'numpy.mean', 'np.mean', (['epoch_losses_refiner'], {}), '(epoch_losses_refiner)\n', (11069, 11091), True, 'import numpy as np\n'), ((11115, 11136), 'numpy.mean', 'np.mean', (['epoch_losses'], {}), '(epoch_losses)\n', (11122, 11136), True, 'import numpy as np\n'), ((11818, 11827), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (11825, 11827), True, 'from matplotlib import pyplot as plt\n'), ((11840, 11856), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11849, 11856), True, 'from matplotlib import pyplot as plt\n'), ((11880, 11949), 'matplotlib.pyplot.subplots', 'plt.subplots', (['fig_x', 'fig_y'], {'constrained_layout': '(True)', 'figsize': '(25, 15)'}), '(fig_x, fig_y, constrained_layout=True, figsize=(25, 15))\n', (11892, 11949), True, 'from matplotlib import pyplot as plt\n'), ((16272, 16300), 'json.dump', 'json.dump', (['out_dict', 'outfile'], {}), '(out_dict, outfile)\n', (16281, 16300), False, 'import json\n'), ((16980, 16991), 'time.time', 'time.time', ([], {}), '()\n', (16989, 16991), False, 'import time\n'), ((18821, 18860), 'DenseFusion.lib.loss.Loss', 'Loss', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (18825, 18860), False, 'from DenseFusion.lib.loss import Loss\n'), ((18892, 18938), 'DenseFusion.lib.loss_refiner.Loss_refine', 'Loss_refine', (['opt.num_points_mesh', 'opt.sym_list'], {}), '(opt.num_points_mesh, opt.sym_list)\n', (18903, 18938), False, 'from DenseFusion.lib.loss_refiner import Loss_refine\n'), ((8226, 8263), 'os.path.join', 'os.path.join', (['opt.log_dir_images', 'img'], {}), '(opt.log_dir_images, img)\n', (8238, 8263), False, 'import os\n'), ((14975, 14991), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14984, 14991), True, 'from matplotlib import pyplot as plt\n'), ((15008, 15017), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (15015, 15017), True, 'from matplotlib import pyplot as plt\n'), ((15045, 15106), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'constrained_layout': '(True)', 'figsize': '(30, 20)'}), '(2, 2, constrained_layout=True, figsize=(30, 20))\n', (15057, 15106), True, 'from matplotlib import pyplot as plt\n'), ((16201, 16241), 'os.path.join', 'os.path.join', (['opt.log_dir', '"""losses.json"""'], {}), "(opt.log_dir, 'losses.json')\n", (16213, 16241), False, 'import os\n'), ((17132, 17159), 'numpy.round', 'np.round', (['(t_elapsed / 60)', '(2)'], {}), '(t_elapsed / 60, 2)\n', (17140, 17159), True, 'import numpy as np\n'), ((17159, 17182), 'numpy.round', 'np.round', (['elapsed_times'], {}), '(elapsed_times)\n', (17167, 17182), True, 'import numpy as np\n'), ((8121, 8151), 'os.path.join', 'os.path.join', (['opt.log_dir', 'log'], {}), '(opt.log_dir, log)\n', (8133, 8151), False, 'import os\n'), ((13816, 13837), 'numpy.add', 'np.add', (['np_pred', 'my_t'], {}), '(np_pred, my_t)\n', (13822, 13837), True, 'import numpy as np\n'), ((15950, 15996), 'os.path.join', 'os.path.join', (['opt.log_dir_images', '"""losses.png"""'], {}), "(opt.log_dir_images, 'losses.png')\n", (15962, 15996), False, 'import os\n'), ((12145, 12161), 'torch.autograd.Variable', 'Variable', (['points'], {}), '(points)\n', (12153, 12161), False, 'from torch.autograd import Variable\n'), ((12233, 12249), 'torch.autograd.Variable', 'Variable', (['choose'], {}), '(choose)\n', (12241, 12249), False, 'from torch.autograd import Variable\n'), ((12321, 12334), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (12329, 12334), False, 'from torch.autograd import Variable\n'), ((12406, 12422), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (12414, 12422), False, 'from torch.autograd import Variable\n'), ((12494, 12516), 'torch.autograd.Variable', 'Variable', (['model_points'], {}), '(model_points)\n', (12502, 12516), False, 'from torch.autograd import Variable\n'), ((12588, 12601), 'torch.autograd.Variable', 'Variable', (['idx'], {}), '(idx)\n', (12596, 12601), False, 'from torch.autograd import Variable\n'), ((13673, 13696), 'DenseFusion.lib.transformations.quaternion_matrix', 'quaternion_matrix', (['my_r'], {}), '(my_r)\n', (13690, 13696), False, 'from DenseFusion.lib.transformations import quaternion_matrix\n'), ((19041, 19055), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (19045, 19055), False, 'from pathlib import Path\n'), ((9315, 9331), 'torch.autograd.Variable', 'Variable', (['points'], {}), '(points)\n', (9323, 9331), False, 'from torch.autograd import Variable\n'), ((9407, 9423), 'torch.autograd.Variable', 'Variable', (['choose'], {}), '(choose)\n', (9415, 9423), False, 'from torch.autograd import Variable\n'), ((9499, 9512), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (9507, 9512), False, 'from torch.autograd import Variable\n'), ((9588, 9604), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (9596, 9604), False, 'from torch.autograd import Variable\n'), ((9680, 9702), 'torch.autograd.Variable', 'Variable', (['model_points'], {}), '(model_points)\n', (9688, 9702), False, 'from torch.autograd import Variable\n'), ((9778, 9791), 'torch.autograd.Variable', 'Variable', (['idx'], {}), '(idx)\n', (9786, 9791), False, 'from torch.autograd import Variable\n')]
|
import csv
import os
import re
from operator import itemgetter
from typing import Tuple
from urllib import parse
import json
from itertools import groupby
from markdown.extensions.toc import slugify
from iir import xml_to_filt
def extract_from_repo(path1: str, path2: str, content_type: str):
'''
extracts beq_metadata of following format
<beq_metadata>
<beq_title>9</beq_title>
<beq_alt_title />
<beq_sortTitle>9</beq_sortTitle>
<beq_year>2009</beq_year>
<beq_spectrumURL>https://i.imgur.com/aRic6II.jpg</beq_spectrumURL>
<beq_pvaURL>https://i.imgur.com/4DReGr5.jpg</beq_pvaURL>
<beq_edition />
<beq_season />
<beq_note />
<beq_warning />
<beq_gain>-1 gain</beq_gain>
<beq_language>English</beq_language>
<beq_source>Disc</beq_source>
<beq_author>aron7awol</beq_author>
<beq_avs>https://www.avsforum.com/threads/bass-eq-for-filtered-movies.2995212/post-57282106</beq_avs>
<beq_theMovieDB>12244</beq_theMovieDB>
<beq_poster>/usfcQZRqdXTSSQ55esiPHJZKkIU.jpg</beq_poster>
<beq_runtime>79</beq_runtime>
<beq_audioTypes>
<audioType>DTS-HD MA 5.1</audioType>
</beq_audioTypes>
<beq_genres>
<genre id="878">Science Fiction</genre>
<genre id="16">Animation</genre>
<genre id="12">Adventure</genre>
<genre id="28">Action</genre>
<genre id="53">Thriller</genre>
</beq_genres>
</beq_metadata>
new season format replaces beq_season with
<beq_season id="92137">
<number>1</number>
<poster>/q1X7Ev3Hcr0Q7aUiWgw1ZUZf1QZ.jpg</poster>
<episodes count="8">1,2,3,4,5,6,7,8</episodes>
</beq_season>
:return:
'''
import xml.etree.ElementTree as ET
import glob
elements = []
for xml in glob.glob(f"{path1}{path2}/**/*.xml", recursive=True):
et_tree = ET.parse(str(xml))
root = et_tree.getroot()
file_name = xml[:-4]
meta = {
'repo_file': str(xml),
'file_name': file_name.split('/')[-1],
'file_path': '/'.join(file_name[len(path1):].split('/')[:-1]),
'content_type': content_type
}
for child in root:
if child.tag == 'beq_metadata':
for m in child:
if len(m) == 0:
txt = m.text
if txt:
meta[m.tag[4:]] = m.text
elif m.tag == 'beq_audioTypes':
audio_types = [c.text.strip() for c in m]
meta['audioType'] = [at for at in audio_types if at]
elif m.tag == 'beq_season':
parse_season(m, meta, xml)
elif m.tag == 'beq_genres':
genres = [c.text.strip() for c in m]
meta['genres'] = [at for at in genres if at]
filts = [f for f in xml_to_filt(xml, unroll=True)]
meta['jsonfilters'] = [f.to_map() for f in filts]
meta['filters'] = '^'.join([str(f) for f in filts])
elements.append(meta)
return elements
def parse_season(m, meta, xml):
try:
meta['season'] = {'id': m.attrib['id']}
for c in m:
if c.tag == 'episodes':
meta['season']['episode_count'] = c.attrib['count']
meta['season'][c.tag] = c.text
complete = True
if 'episode_count' in meta['season'] and 'episodes' in meta['season']:
count = int(meta['season']['episode_count'])
epi_txt = meta['season']['episodes']
if epi_txt:
episodes = [int(e) for e in meta['season']['episodes'].split(',')]
for c in range(count):
if c + 1 not in episodes:
complete = False
meta['season']['complete'] = complete
except:
print(f"Unable to parse season info from {xml}")
def group_mobe1969_film_content(content_meta):
by_title = {}
fallback_pattern = re.compile(r'(.*) \((\d{4})\)(?: *\(.*\))? (.*)')
for meta in content_meta:
if 'title' in meta:
title = meta['title']
if title in by_title:
by_title[title].append(meta)
else:
by_title[title] = [meta]
else:
json = {
'title': meta['file_name'],
'author': 'mobe1969',
'content_type': meta['content_type']
}
match = fallback_pattern.match(meta['file_name'])
if match:
json['title'] = match.group(1)
json['year'] = match.group(2)
json['audioTypes'] = match.group(3).split('+')
print(f"Missing title entry, extracted {json}")
json['filters'] = meta['jsonfilters']
json_catalogue.append(json)
return by_title
def group_mobe1969_tv_content(content_meta):
by_title = {}
fallback_pattern = re.compile(r'(.*) \((\d{4})\)(?: *\(.*\))? (.*)')
for meta in content_meta:
if 'title' in meta:
title = meta['title']
if title[-4:-2] == ' E' and title[-2:].isdigit():
meta['episode'] = title[-2:]
title = title[:-4]
meta['title'] = title
elif 'note' in meta:
note = meta['note']
if note[0] == 'E':
if note[1:].isdigit():
meta['episode'] = note[1:]
elif '-' in note[1:]:
vals = [int(i) for i in note[1:].split('-')]
if len(vals) == 2:
meta['episode'] = ','.join([str(e) for e in range(vals[0], vals[1] + 1)])
elif note[0] == 'S':
frags = note.split('-')
if len(frags) == 2:
if frags[1][0] == 'E':
if frags[1][1:].isdigit():
meta['episode'] = frags[1][1:]
if 'episode' not in meta:
print(f"Unknown note format in {meta}")
if title in by_title:
by_title[title].append(meta)
else:
by_title[title] = [meta]
else:
json = {
'title': meta['file_name'],
'author': 'mobe1969',
'content_type': meta['content_type']
}
match = fallback_pattern.match(meta['file_name'])
if match:
json['title'] = match.group(1)
json['year'] = match.group(2)
json['audioTypes'] = match.group(3).split('+')
print(f"Missing title entry, extracted {json}")
json['filters'] = meta['jsonfilters']
json_catalogue.append(json)
return by_title
def process_mobe1969_content_from_repo(content_meta, index_entries, content_type):
''' converts beq_metadata into md '''
if content_type == 'film':
by_title = group_mobe1969_film_content(content_meta)
else:
by_title = group_mobe1969_tv_content(content_meta)
for title, metas in by_title.items():
title_md = slugify(title, '-')
with open(f"docs/mobe1969/{title_md}.md", mode='w+') as content_md:
generate_content_page(title_md, metas, content_md, index_entries, 'mobe1969', content_type)
def process_aron7awol_content_from_repo(content_meta, index_entries, content_type):
''' converts beq_metadata into md '''
for post_id, metas in group_aron7awol_content(content_meta, content_type).items():
with open(f"docs/aron7awol/{post_id}.md", mode='w+') as content_md:
generate_content_page(post_id, metas, content_md, index_entries, 'aron7awol', content_type)
def group_aron7awol_content(content_meta, content_type) -> dict:
grouped_meta = {}
if content_type == 'film':
for meta in content_meta:
if 'avs' in meta:
avs = meta['avs']
idx = avs.find('post?id=')
avs_post_id = None
if idx == -1:
idx = avs.find('post-')
if idx == -1:
print(f"Unparsable post id {meta['repo_file']} - {avs}")
else:
avs_post_id = avs[idx + 5:]
else:
avs_post_id = avs[idx + 8:]
if avs_post_id:
if avs_post_id in grouped_meta:
grouped_meta[avs_post_id].append(meta)
else:
grouped_meta[avs_post_id] = [meta]
else:
print(f"Missing beq_avs entry for {meta['repo_file']}")
else:
for meta in content_meta:
if 'title' in meta:
title = slugify(meta['title'], '-')
if title in grouped_meta:
grouped_meta[title].append(meta)
else:
grouped_meta[title] = [meta]
return grouped_meta
def generate_content_page(page_name, metas, content_md, index_entries, author, content_type):
if content_type == 'film':
generate_film_content_page(page_name, metas, content_md, index_entries, author)
else:
generate_tv_content_page(page_name, metas, content_md, index_entries, author)
def generate_film_content_page(page_name, metas, content_md, index_entries, author):
''' prints the md content page '''
print(f"# {metas[0]['title']}", file=content_md)
print("", file=content_md)
print(f"* Author: {author}", file=content_md)
if 'avs' in metas[0]:
print(f"* [Forum Post]({metas[0]['avs']})", file=content_md)
production_years = {m['year'] for m in metas}
img_idx = 0
if len(production_years) == 1:
print(f"* Production Year: {production_years.pop()}", file=content_md)
print("", file=content_md)
for meta in sorted(metas, key=lambda m: ', '.join(m.get('audioType', ''))):
if 'pvaURL' not in meta and 'spectrumURL' not in meta:
print(f"No charts found in {meta}")
else:
audio_type = meta.get('audioType', '')
beq_catalogue_url = ''
actual_img_links = []
if 'pvaURL' in meta:
actual_img_links.append(meta['pvaURL'])
if 'spectrumURL' in meta:
actual_img_links.append(meta['spectrumURL'])
if audio_type:
linked_content_format = ', '.join(audio_type)
print(f"## {linked_content_format}", file=content_md)
print("", file=content_md)
if production_years:
print(f"* Production Year: {meta['year']}", file=content_md)
print("", file=content_md)
for img in actual_img_links:
print(f"", file=content_md)
print('', file=content_md)
bd_url = generate_index_entry(author, page_name, linked_content_format, meta['title'], meta['year'],
meta.get('avs', None), len(metas) > 1, index_entries)
prefix = 'https://beqcatalogue.readthedocs.io/en/latest'
beq_catalogue_url = f"{prefix}/{author}/{page_name}/#{slugify(linked_content_format, '-')}"
cols = [
meta['title'],
meta['year'],
linked_content_format,
author,
meta.get('avs', ''),
beq_catalogue_url,
bd_url,
meta['filters']
]
db_writer.writerow(cols + actual_img_links)
else:
print(f"No audioTypes in {metas[0]['title']}")
json_catalogue.append({
'title': meta['title'],
'year': meta['year'],
'audioTypes': meta.get('audioType', []),
'content_type': 'film',
'author': author,
'catalogue_url': beq_catalogue_url,
'filters': meta['jsonfilters'],
'images': actual_img_links,
'warning': meta.get('warning', ''),
'mv': meta.get('gain', '0'),
'avs': meta.get('avs', ''),
'sortTitle': meta.get('sortTitle', ''),
'edition': meta.get('edition', ''),
'note': meta.get('note', ''),
'language': meta.get('language', ''),
'source': meta.get('source', ''),
'overview': meta.get('overview', ''),
'theMovieDB': meta.get('theMovieDB', ''),
'rating': meta.get('rating', ''),
'runtime': meta.get('runtime', '0'),
'genres': meta.get('genres', [])
})
def format_season_episode(m) -> Tuple[str, str, str, str]:
long_season_episode = ''
short_season_episode = ''
season = ''
episodes = ''
if 'season' in m:
season_meta = m['season']
if isinstance(season_meta, str):
season = season_meta
long_season_episode = f"Season {season}"
short_season_episode = f"S{season}"
if 'episode' in m:
episodes = m['episode']
long_season_episode += f" Episode {episodes}"
short_season_episode += f"E{episodes}"
else:
season = season_meta['number']
long_season_episode = f"Season {season}"
short_season_episode = f"S{season}"
if not season_meta['complete']:
episodes = season_meta['episodes']
to_print = episodes
s = ''
if ',' in episodes:
epi_nums = [int(e) for e in episodes.split(',')]
if len(epi_nums) > 1:
ranges = []
for k, g in groupby(enumerate(epi_nums), lambda t: t[0] - t[1]):
group = list(map(itemgetter(1), g))
if group[0] == group[-1]:
ranges.append(f"{group[0]}")
else:
ranges.append(f"{group[0]}-{group[-1]}")
to_print = ', '.join(ranges)
s = 's'
long_season_episode += f" Episode{s} {to_print}"
short_season_episode += f"E{to_print}"
return long_season_episode, short_season_episode, season, episodes
def generate_tv_content_page(page_name, metas, content_md, index_entries, author):
''' prints the md content page '''
print(f"# {metas[0]['title']}", file=content_md)
print("", file=content_md)
print(f"* Author: {author}", file=content_md)
img_idx = 0
print("", file=content_md)
def sort_meta(m):
sort_key = ''
if 'season' in m:
season_meta = m['season']
if isinstance(season_meta, str):
sort_key = season_meta
if 'episode' in m:
sort_key += m['episode']
else:
sort_key = season_meta['number']
if not season_meta['complete']:
sort_key += season_meta['episodes']
return sort_key
for meta in sorted(metas, key=sort_meta):
audio_type = meta.get('audioType', '')
linked_content_format = ''
actual_img_links = []
long_season, short_season, season, episodes = format_season_episode(meta)
if 'pvaURL' in meta:
actual_img_links.append(meta['pvaURL'])
if 'spectrumURL' in meta:
actual_img_links.append(meta['spectrumURL'])
if long_season:
print(f"## {long_season}", file=content_md)
print("", file=content_md)
if audio_type:
linked_content_format = ', '.join(audio_type)
print(f"* {linked_content_format}", file=content_md)
print("", file=content_md)
if 'avs' in meta:
print(f"* [Forum Post]({meta['avs']})", file=content_md)
if 'year' in meta:
print(f"* Production Year: {meta['year']}", file=content_md)
print("", file=content_md)
for img in actual_img_links:
print(f"", file=content_md)
print('', file=content_md)
extra_slug = f"#{slugify(long_season, '-')}" if long_season else ''
bd_url = generate_index_entry(author, page_name, linked_content_format, f"{meta['title']} {short_season}",
meta['year'], meta.get('avs', None), len(metas) > 1, index_entries,
content_type='TV', extra_slug=extra_slug)
prefix = 'https://beqcatalogue.readthedocs.io/en/latest'
slugified_link = f"/{extra_slug}" if extra_slug else ''
beq_catalogue_url = f"{prefix}/{author}/{page_name}{slugified_link}"
cols = [
meta['title'],
meta['year'],
linked_content_format,
author,
meta.get('avs', ''),
beq_catalogue_url,
bd_url,
meta['filters']
]
db_writer.writerow(cols + actual_img_links)
# TODO remove once metadata is added
if author == 'mobe1969' and len(actual_img_links) == 0:
from urllib.parse import quote
print(f"Generating img link for missing meta in {meta}")
fp = meta['file_path'].replace('TV BEQs', 'TV Series')
img = f"https://gitlab.com/Mobe1969/beq-reports/-/raw/master/{quote(fp)}/{quote(meta['file_name'])}.jpg"
actual_img_links = [img]
print(f"", file=content_md)
print('', file=content_md)
json_catalogue.append({
'title': meta['title'],
'year': meta['year'],
'audioTypes': meta.get('audioType', []),
'content_type': 'TV',
'author': author,
'catalogue_url': beq_catalogue_url,
'filters': meta['jsonfilters'],
'images': actual_img_links,
'warning': meta.get('warning', ''),
'season': season,
'episode': episodes,
'mv': meta.get('gain', '0'),
'avs': meta.get('avs', ''),
'sortTitle': meta.get('sortTitle', ''),
'edition': meta.get('edition', ''),
'note': meta.get('note', ''),
'language': meta.get('language', ''),
'source': meta.get('source', ''),
'overview': meta.get('overview', ''),
'theMovieDB': meta.get('theMovieDB', ''),
'rating': meta.get('rating', ''),
'genres': meta.get('genres', [])
})
def generate_index_entry(author, page_name, content_format, content_name, year, avs_url, multiformat, index_entries,
content_type='film', extra_slug=None):
''' dumps the summary info to the index page '''
escaped = parse.quote(content_name)
mdb_url = f"https://www.themoviedb.org/search?query={escaped}"
rt_url = f"https://www.rottentomatoes.com/search?search={escaped}"
bd_url = f"https://www.blu-ray.com/movies/search.php?keyword={escaped}&submit=Search&action=search&"
if content_type == 'film':
extra_slug = f"#{slugify(content_format, '-')}" if multiformat is True else ''
avs_link = f"[avsforum]({avs_url})" if avs_url else ''
index_entries.append(
f"| [{content_name}](./{author}/{page_name}.md{extra_slug}) | {content_type} | {year} | {content_format} | {'Yes' if multiformat else 'No'} | {avs_link} [blu-ray]({bd_url}) [themoviedb]({mdb_url}) [rottentoms]({rt_url}) |")
return bd_url
if os.getcwd() == os.path.dirname(os.path.abspath(__file__)):
print(f"Switching CWD from {os.getcwd()}")
os.chdir('..')
else:
print(f"CWD: {os.getcwd()}")
if __name__ == '__main__':
aron7awol_films = extract_from_repo('.input/bmiller/miniDSPBEQ/', 'Movie BEQs', 'film')
print(f"Extracted {len(aron7awol_films)} aron7awol film catalogue entries")
aron7awol_tv = extract_from_repo('.input/bmiller/miniDSPBEQ/', 'TV Shows BEQ', 'TV')
print(f"Extracted {len(aron7awol_tv)} aron7awol TV catalogue entries")
mobe1969_films = extract_from_repo('.input/Mobe1969/miniDSPBEQ/', 'Movie BEQs', 'film')
print(f"Extracted {len(mobe1969_films)} mobe1969 film catalogue entries")
mobe1969_tv = extract_from_repo('.input/Mobe1969/miniDSPBEQ/', 'TV BEQs', 'TV')
print(f"Extracted {len(mobe1969_tv)} mobe1969 TV catalogue entries")
json_catalogue = []
with open('docs/database.csv', 'w+', newline='') as db_csv:
db_writer = csv.writer(db_csv)
db_writer.writerow(['Title', 'Year', 'Format', 'Author', 'AVS', 'Catalogue', 'blu-ray.com', 'filters'])
index_entries = []
process_aron7awol_content_from_repo(aron7awol_films, index_entries, 'film')
process_aron7awol_content_from_repo(aron7awol_tv, index_entries, 'TV')
with open('docs/aron7awol.md', mode='w+') as index_md:
print(f"# aron7awol", file=index_md)
print('', file=index_md)
print(f"| Title | Type | Year | Format | Multiformat? | Links |", file=index_md)
print(f"|-|-|-|-|-|-|", file=index_md)
for i in sorted(index_entries, key=str.casefold):
print(i, file=index_md)
index_entries = []
process_mobe1969_content_from_repo(mobe1969_films, index_entries, 'film')
process_mobe1969_content_from_repo(mobe1969_tv, index_entries, 'TV')
with open('docs/mobe1969.md', mode='w+') as index_md:
print(f"# Mobe1969", file=index_md)
print('', file=index_md)
print(f"| Title | Type | Year | Format | Multiformat? | Links |", file=index_md)
print(f"|-|-|-|-|-|-|", file=index_md)
for i in sorted(index_entries, key=str.casefold):
print(i, file=index_md)
print('', file=index_md)
with open('docs/database.json', 'w+') as db_json:
json.dump(json_catalogue, db_json, indent=0)
|
[
"json.dump",
"os.path.abspath",
"csv.writer",
"os.getcwd",
"markdown.extensions.toc.slugify",
"urllib.parse.quote",
"iir.xml_to_filt",
"glob.glob",
"operator.itemgetter",
"os.chdir",
"re.compile"
] |
[((2219, 2272), 'glob.glob', 'glob.glob', (['f"""{path1}{path2}/**/*.xml"""'], {'recursive': '(True)'}), "(f'{path1}{path2}/**/*.xml', recursive=True)\n", (2228, 2272), False, 'import glob\n'), ((4467, 4520), 're.compile', 're.compile', (['"""(.*) \\\\((\\\\d{4})\\\\)(?: *\\\\(.*\\\\))? (.*)"""'], {}), "('(.*) \\\\((\\\\d{4})\\\\)(?: *\\\\(.*\\\\))? (.*)')\n", (4477, 4520), False, 'import re\n'), ((5429, 5482), 're.compile', 're.compile', (['"""(.*) \\\\((\\\\d{4})\\\\)(?: *\\\\(.*\\\\))? (.*)"""'], {}), "('(.*) \\\\((\\\\d{4})\\\\)(?: *\\\\(.*\\\\))? (.*)')\n", (5439, 5482), False, 'import re\n'), ((19644, 19669), 'urllib.parse.quote', 'parse.quote', (['content_name'], {}), '(content_name)\n', (19655, 19669), False, 'from urllib import parse\n'), ((20371, 20382), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20380, 20382), False, 'import os\n'), ((20481, 20495), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (20489, 20495), False, 'import os\n'), ((7674, 7693), 'markdown.extensions.toc.slugify', 'slugify', (['title', '"""-"""'], {}), "(title, '-')\n", (7681, 7693), False, 'from markdown.extensions.toc import slugify\n'), ((20402, 20427), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (20417, 20427), False, 'import os\n'), ((21337, 21355), 'csv.writer', 'csv.writer', (['db_csv'], {}), '(db_csv)\n', (21347, 21355), False, 'import csv\n'), ((22733, 22777), 'json.dump', 'json.dump', (['json_catalogue', 'db_json'], {'indent': '(0)'}), '(json_catalogue, db_json, indent=0)\n', (22742, 22777), False, 'import json\n'), ((3363, 3392), 'iir.xml_to_filt', 'xml_to_filt', (['xml'], {'unroll': '(True)'}), '(xml, unroll=True)\n', (3374, 3392), False, 'from iir import xml_to_filt\n'), ((9324, 9351), 'markdown.extensions.toc.slugify', 'slugify', (["meta['title']", '"""-"""'], {}), "(meta['title'], '-')\n", (9331, 9351), False, 'from markdown.extensions.toc import slugify\n'), ((20462, 20473), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20471, 20473), False, 'import os\n'), ((20520, 20531), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20529, 20531), False, 'import os\n'), ((17004, 17029), 'markdown.extensions.toc.slugify', 'slugify', (['long_season', '"""-"""'], {}), "(long_season, '-')\n", (17011, 17029), False, 'from markdown.extensions.toc import slugify\n'), ((18224, 18233), 'urllib.parse.quote', 'quote', (['fp'], {}), '(fp)\n', (18229, 18233), False, 'from urllib.parse import quote\n'), ((18236, 18260), 'urllib.parse.quote', 'quote', (["meta['file_name']"], {}), "(meta['file_name'])\n", (18241, 18260), False, 'from urllib.parse import quote\n'), ((19969, 19997), 'markdown.extensions.toc.slugify', 'slugify', (['content_format', '"""-"""'], {}), "(content_format, '-')\n", (19976, 19997), False, 'from markdown.extensions.toc import slugify\n'), ((11823, 11858), 'markdown.extensions.toc.slugify', 'slugify', (['linked_content_format', '"""-"""'], {}), "(linked_content_format, '-')\n", (11830, 11858), False, 'from markdown.extensions.toc import slugify\n'), ((14598, 14611), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (14608, 14611), False, 'from operator import itemgetter\n')]
|
import os
from contextlib import contextmanager
from math import ceil
import pytest
import requests
import requests_mock
from apisports import _client_class
from apisports._client import ClientMeta, ClientInitError
from apisports.data import SingleData, NoneData, SimpleData, PagedData
from helpers import assert_response_ok
@contextmanager
def clientmeta_test_path():
"""
Get ClientMeta which loads from tests YAML location
"""
prev_dir = ClientMeta.data_dir
try:
ClientMeta.data_dir = os.path.join(os.path.dirname(__file__), 'data')
yield ClientMeta
finally:
ClientMeta.data_dir = prev_dir
def expect_client_init_error(name, version=None):
if version is None:
version = 1
expected_message = "Could not load API config for {name} from {path}"
with clientmeta_test_path():
with pytest.raises(ClientInitError) as excinfo:
_client_class(name, version)
assert str(excinfo.value) == expected_message.format(
name=name.lower(),
path=os.path.join(ClientMeta.data_dir, f'{name.lower()}-v{version}.yaml')
)
def clientmeta_test_class(name, version=None):
with clientmeta_test_path():
cls = _client_class(name, version)
return cls
@pytest.fixture
def test_v3():
return clientmeta_test_class('test', 3)
@pytest.fixture
def mock(session):
with requests_mock.mock() as mock:
return mock
@pytest.fixture
def session(adapter):
session = requests.Session()
session.mount('http+mock://', adapter)
return session
@pytest.fixture
def adapter():
return requests_mock.Adapter()
def register_mock_uri(adapter, *args, **kwargs):
def _(func):
def wrapped_func(request, context):
context.status_code = 200
context.headers['Content-Type'] = 'application/json'
return func(request, context)
adapter.register_uri(
'GET',
*args,
**kwargs,
json=wrapped_func
)
return _
def test_client_init_error():
expect_client_init_error('FileDoesNotExist')
expect_client_init_error('InvalidYAML')
def test_clientmeta(test_v3, session):
assert test_v3.default_host == 'http+mock://api-test1.server.local'
assert callable(test_v3.status)
assert callable(test_v3.ping)
assert callable(test_v3.null)
assert callable(test_v3.paginated_count)
assert callable(test_v3.import_)
def test_session(test_v3, session):
t = test_v3(session=session)
t2 = test_v3()
assert type(t._session) is requests.Session
assert t._session is session
assert type(t2._session) is requests.Session
assert t2._session is not session
def test_status(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/status')
def mock_status(request, context):
return {"response": {"status": "ok"}}
test = test_v3(session=session)
response = test.status()
assert_response_ok(response)
expected = dict(status="ok")
data = response.data
assert type(data) is SingleData
assert len(response) == 1
assert list(iter(data)) == [expected]
assert next(iter(response)) == expected
assert next(iter(data)) == expected
assert data.item() == expected
def test_null(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/null')
def mock_null(request, context):
return {"response": None}
test = test_v3(session=session)
response = test.null()
assert_response_ok(response)
assert response.data is NoneData
def test_python_keyword_import(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/import')
def mock_null(request, context):
return {"response": None}
test = test_v3(session=session)
response = test.import_()
assert_response_ok(response)
assert response.data is NoneData
def test_paginated_count(test_v3, session, mock, adapter):
@register_mock_uri(adapter, 'http+mock://api-test1.server.local/paginated-count')
def mock_paginated_count(request, context):
per_page = 3
params = {k: v[0] for k, v in request.qs.items()}
try:
start = int(params["from"]) if 'from' in params else 1
stop = int(params["to"]) + 1 if 'to' in params else 14
page = int(params['page']) if 'page' in params else 1
except ValueError as exc:
return {
"errors": [
{
"message": str(exc)
}
]
}
start = start + (page - 1) * per_page
if start > stop:
return {
"errors": [
{
"page": "value too high"
}
]
}
result = list(range(start, min(stop, start + per_page)))
return {
"get": "paginated-count",
"parameters": params,
"paging": {
"current": page,
"total": ceil((stop - start) / per_page),
},
"results": len(result),
"response": result
}
test = test_v3(session=session)
test.paginated_count()
response = test.paginated_count(**{"from": 1, "to": 10})
expected = list(range(1, 11))
assert type(response.data) is PagedData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
# test support for keyword safe parameter alias
response = test.paginated_count(from_=1, to=10)
expected = list(range(1, 11))
assert type(response.data) is PagedData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
response = test.paginated_count(from_=1, to=2)
expected = [1, 2]
assert type(response.data) is SimpleData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
response = test.paginated_count(from_=1, to=1)
expected = [1]
assert type(response.data) is SingleData
assert list(iter(response.data)) == expected
assert list(iter(response)) == expected
assert response.data.item() == 1
|
[
"requests_mock.Adapter",
"math.ceil",
"requests.Session",
"requests_mock.mock",
"helpers.assert_response_ok",
"apisports._client_class",
"os.path.dirname",
"pytest.raises"
] |
[((1505, 1523), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1521, 1523), False, 'import requests\n'), ((1630, 1653), 'requests_mock.Adapter', 'requests_mock.Adapter', ([], {}), '()\n', (1651, 1653), False, 'import requests_mock\n'), ((3026, 3054), 'helpers.assert_response_ok', 'assert_response_ok', (['response'], {}), '(response)\n', (3044, 3054), False, 'from helpers import assert_response_ok\n'), ((3605, 3633), 'helpers.assert_response_ok', 'assert_response_ok', (['response'], {}), '(response)\n', (3623, 3633), False, 'from helpers import assert_response_ok\n'), ((3958, 3986), 'helpers.assert_response_ok', 'assert_response_ok', (['response'], {}), '(response)\n', (3976, 3986), False, 'from helpers import assert_response_ok\n'), ((1234, 1262), 'apisports._client_class', '_client_class', (['name', 'version'], {}), '(name, version)\n', (1247, 1262), False, 'from apisports import _client_class\n'), ((1401, 1421), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (1419, 1421), False, 'import requests_mock\n'), ((534, 559), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (549, 559), False, 'import os\n'), ((864, 894), 'pytest.raises', 'pytest.raises', (['ClientInitError'], {}), '(ClientInitError)\n', (877, 894), False, 'import pytest\n'), ((919, 947), 'apisports._client_class', '_client_class', (['name', 'version'], {}), '(name, version)\n', (932, 947), False, 'from apisports import _client_class\n'), ((5199, 5230), 'math.ceil', 'ceil', (['((stop - start) / per_page)'], {}), '((stop - start) / per_page)\n', (5203, 5230), False, 'from math import ceil\n')]
|
#
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project.
#
from data_wrangling_components.engine.verbs.groupby import groupby
from data_wrangling_components.engine.verbs.ungroup import ungroup
from data_wrangling_components.types import Step, Verb
from tests.engine.test_store import get_test_store
def test_ungroup():
step1 = Step(
Verb.Groupby,
"table10",
"output",
args={"columns": ["x", "y"]},
)
store = get_test_store()
groupby_result = groupby(step1, store)
store.set("newTable", groupby_result)
step2 = Step(
Verb.Ungroup,
"newTable",
"output",
)
result = ungroup(step2, store)
assert len(result.table.columns) == 3
assert len(result.table) == 3
assert result.table.loc[0, "x"] == "A"
assert result.table.loc[1, "x"] == "B"
assert result.table.loc[2, "x"] == "A"
|
[
"tests.engine.test_store.get_test_store",
"data_wrangling_components.types.Step",
"data_wrangling_components.engine.verbs.ungroup.ungroup",
"data_wrangling_components.engine.verbs.groupby.groupby"
] |
[((406, 475), 'data_wrangling_components.types.Step', 'Step', (['Verb.Groupby', '"""table10"""', '"""output"""'], {'args': "{'columns': ['x', 'y']}"}), "(Verb.Groupby, 'table10', 'output', args={'columns': ['x', 'y']})\n", (410, 475), False, 'from data_wrangling_components.types import Step, Verb\n'), ((535, 551), 'tests.engine.test_store.get_test_store', 'get_test_store', ([], {}), '()\n', (549, 551), False, 'from tests.engine.test_store import get_test_store\n'), ((576, 597), 'data_wrangling_components.engine.verbs.groupby.groupby', 'groupby', (['step1', 'store'], {}), '(step1, store)\n', (583, 597), False, 'from data_wrangling_components.engine.verbs.groupby import groupby\n'), ((658, 698), 'data_wrangling_components.types.Step', 'Step', (['Verb.Ungroup', '"""newTable"""', '"""output"""'], {}), "(Verb.Ungroup, 'newTable', 'output')\n", (662, 698), False, 'from data_wrangling_components.types import Step, Verb\n'), ((750, 771), 'data_wrangling_components.engine.verbs.ungroup.ungroup', 'ungroup', (['step2', 'store'], {}), '(step2, store)\n', (757, 771), False, 'from data_wrangling_components.engine.verbs.ungroup import ungroup\n')]
|
#!/usr/bin/env python
import sys
import os.path
import logging
import optparse
if os.name == "posix":
stream = sys.stderr
else:
#when running from py2exe, if anything is printed to stderr
#then the app shows an annoying dialog when closed
stream = sys.stdout
logging.basicConfig(
level=logging.DEBUG,
format="[%(name)-20s][%(levelname)-7s] %(message)s (%(filename)s:%(lineno)d)",
stream=stream
)
try:
import gs
except ImportError:
#probbably running from the source dir
sys.path.insert(0,os.path.dirname(os.path.abspath(__file__)))
import gs
import gs.groundstation as groundstation
if __name__ == "__main__":
parser = gs.get_default_command_line_parser(True, True, True)
options, args = parser.parse_args()
if gs.IS_WINDOWS:
import gtk.gdk
gtk.gdk.threads_enter()
groundstation.Groundstation(options).main()
if gs.IS_WINDOWS:
import gtk.gdk
gtk.gdk.threads_leave()
sys.exit(0)
|
[
"sys.exit",
"gs.get_default_command_line_parser",
"logging.basicConfig",
"gs.groundstation.Groundstation"
] |
[((277, 420), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[%(name)-20s][%(levelname)-7s] %(message)s (%(filename)s:%(lineno)d)"""', 'stream': 'stream'}), "(level=logging.DEBUG, format=\n '[%(name)-20s][%(levelname)-7s] %(message)s (%(filename)s:%(lineno)d)',\n stream=stream)\n", (296, 420), False, 'import logging\n'), ((676, 728), 'gs.get_default_command_line_parser', 'gs.get_default_command_line_parser', (['(True)', '(True)', '(True)'], {}), '(True, True, True)\n', (710, 728), False, 'import gs\n'), ((979, 990), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (987, 990), False, 'import sys\n'), ((854, 890), 'gs.groundstation.Groundstation', 'groundstation.Groundstation', (['options'], {}), '(options)\n', (881, 890), True, 'import gs.groundstation as groundstation\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class that serves as the WoT entrypoint.
"""
import json
import logging
import warnings
import six
import tornado.concurrent
import tornado.gen
import tornado.ioloop
from rx import Observable
from six.moves import range
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from wotpy.support import is_dnssd_supported
from wotpy.utils.utils import handle_observer_finalization
from wotpy.wot.consumed.thing import ConsumedThing
from wotpy.wot.dictionaries.thing import ThingFragment
from wotpy.wot.enums import DiscoveryMethod
from wotpy.wot.exposed.thing import ExposedThing
from wotpy.wot.td import ThingDescription
from wotpy.wot.thing import Thing
DEFAULT_FETCH_TIMEOUT_SECS = 20.0
class WoT(object):
"""The WoT object is the API entry point and it is exposed by an
implementation of the WoT Runtime. The WoT object does not expose
properties, only methods for discovering, consuming and exposing a Thing."""
def __init__(self, servient):
self._servient = servient
self._logr = logging.getLogger(__name__)
@property
def servient(self):
"""Servient instance of this WoT entrypoint."""
return self._servient
@classmethod
def _is_fragment_match(cls, item, thing_filter):
"""Returns True if the given item (an ExposedThing, Thing or TD)
matches the fragment in the given Thing filter."""
td = None
if isinstance(item, ExposedThing):
td = ThingDescription.from_thing(item.thing)
elif isinstance(item, Thing):
td = ThingDescription.from_thing(item)
elif isinstance(item, ThingDescription):
td = item
assert td
fragment_dict = thing_filter.fragment if thing_filter.fragment else {}
return all(
item in six.iteritems(td.to_dict())
for item in six.iteritems(fragment_dict))
def _build_local_discover_observable(self, thing_filter):
"""Builds an Observable to discover Things using the local method."""
found_tds = [
ThingDescription.from_thing(exposed_thing.thing).to_str()
for exposed_thing in self._servient.exposed_things
if self._is_fragment_match(exposed_thing, thing_filter)
]
# noinspection PyUnresolvedReferences
return Observable.of(*found_tds)
def _build_dnssd_discover_observable(self, thing_filter, dnssd_find_kwargs):
"""Builds an Observable to discover Things using the multicast method based on DNS-SD."""
if not is_dnssd_supported():
warnings.warn("Unsupported DNS-SD multicast discovery")
# noinspection PyUnresolvedReferences
return Observable.empty()
dnssd_find_kwargs = dnssd_find_kwargs if dnssd_find_kwargs else {}
if not self._servient.dnssd:
# noinspection PyUnresolvedReferences
return Observable.empty()
def subscribe(observer):
"""Browses the Servient services using DNS-SD and retrieves the TDs that match the filters."""
state = {"stop": False}
@handle_observer_finalization(observer)
@tornado.gen.coroutine
def callback():
address_port_pairs = yield self._servient.dnssd.find(**dnssd_find_kwargs)
def build_pair_url(idx, path=None):
addr, port = address_port_pairs[idx]
base = "http://{}:{}".format(addr, port)
path = path if path else ''
return "{}/{}".format(base, path.strip("/"))
http_client = AsyncHTTPClient()
catalogue_resps = [
http_client.fetch(build_pair_url(idx))
for idx in range(len(address_port_pairs))
]
wait_iter = tornado.gen.WaitIterator(*catalogue_resps)
while not wait_iter.done() and not state["stop"]:
try:
catalogue_resp = yield wait_iter.next()
except Exception as ex:
self._logr.warning(
"Exception on HTTP request to TD catalogue: {}".format(ex))
else:
catalogue = json.loads(catalogue_resp.body)
if state["stop"]:
return
td_resps = yield [
http_client.fetch(build_pair_url(
wait_iter.current_index, path=path))
for thing_id, path in six.iteritems(catalogue)
]
tds = [
ThingDescription(td_resp.body)
for td_resp in td_resps
]
tds_filtered = [
td for td in tds if self._is_fragment_match(td, thing_filter)]
[observer.on_next(td.to_str()) for td in tds_filtered]
def unsubscribe():
state["stop"] = True
tornado.ioloop.IOLoop.current().add_callback(callback)
return unsubscribe
# noinspection PyUnresolvedReferences
return Observable.create(subscribe)
def discover(self, thing_filter, dnssd_find_kwargs=None):
"""Starts the discovery process that will provide ThingDescriptions
that match the optional argument filter of type ThingFilter."""
supported_methods = [
DiscoveryMethod.ANY,
DiscoveryMethod.LOCAL,
DiscoveryMethod.MULTICAST
]
if thing_filter.method not in supported_methods:
err = NotImplementedError("Unsupported discovery method")
# noinspection PyUnresolvedReferences
return Observable.throw(err)
if thing_filter.query:
err = NotImplementedError(
"Queries are not supported yet (please use filter.fragment)")
# noinspection PyUnresolvedReferences
return Observable.throw(err)
observables = []
if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.LOCAL]:
observables.append(
self._build_local_discover_observable(thing_filter))
if thing_filter.method in [DiscoveryMethod.ANY, DiscoveryMethod.MULTICAST]:
observables.append(self._build_dnssd_discover_observable(
thing_filter, dnssd_find_kwargs))
# noinspection PyUnresolvedReferences
return Observable.merge(*observables)
@classmethod
@tornado.gen.coroutine
def fetch(cls, url, timeout_secs=None):
"""Accepts an url argument and returns a Future
that resolves with a Thing Description string."""
timeout_secs = timeout_secs or DEFAULT_FETCH_TIMEOUT_SECS
http_client = AsyncHTTPClient()
http_request = HTTPRequest(url, request_timeout=timeout_secs)
http_response = yield http_client.fetch(http_request)
td_doc = json.loads(http_response.body)
td = ThingDescription(td_doc)
raise tornado.gen.Return(td.to_str())
def consume(self, td_str):
"""Accepts a thing description string argument and returns a
ConsumedThing object instantiated based on that description."""
td = ThingDescription(td_str)
return ConsumedThing(servient=self._servient, td=td)
@classmethod
def thing_from_model(cls, model):
"""Takes a ThingModel and builds a Thing.
Raises if the model has an unexpected type."""
expected_types = (six.string_types, ThingFragment, ConsumedThing)
if not isinstance(model, expected_types):
raise ValueError("Expected one of: {}".format(expected_types))
if isinstance(model, six.string_types):
thing = ThingDescription(doc=model).build_thing()
elif isinstance(model, ThingFragment):
thing = Thing(thing_fragment=model)
else:
thing = model.td.build_thing()
return thing
def produce(self, model):
"""Accepts a model argument of type ThingModel and returns an ExposedThing
object, locally created based on the provided initialization parameters."""
thing = self.thing_from_model(model)
exposed_thing = ExposedThing(servient=self._servient, thing=thing)
self._servient.add_exposed_thing(exposed_thing)
return exposed_thing
@tornado.gen.coroutine
def produce_from_url(self, url, timeout_secs=None):
"""Return a Future that resolves to an ExposedThing created
from the thing description retrieved from the given URL."""
td_str = yield self.fetch(url, timeout_secs=timeout_secs)
exposed_thing = self.produce(td_str)
raise tornado.gen.Return(exposed_thing)
@tornado.gen.coroutine
def consume_from_url(self, url, timeout_secs=None):
"""Return a Future that resolves to a ConsumedThing created
from the thing description retrieved from the given URL."""
td_str = yield self.fetch(url, timeout_secs=timeout_secs)
consumed_thing = self.consume(td_str)
raise tornado.gen.Return(consumed_thing)
@tornado.gen.coroutine
def register(self, directory, thing):
"""Generate the Thing Description as td, given the Properties,
Actions and Events defined for this ExposedThing object.
Then make a request to register td to the given WoT Thing Directory."""
raise NotImplementedError()
@tornado.gen.coroutine
def unregister(self, directory, thing):
"""Makes a request to unregister the thing from the given WoT Thing Directory."""
raise NotImplementedError()
|
[
"wotpy.wot.consumed.thing.ConsumedThing",
"rx.Observable.merge",
"json.loads",
"tornado.httpclient.HTTPRequest",
"wotpy.wot.exposed.thing.ExposedThing",
"wotpy.wot.thing.Thing",
"wotpy.support.is_dnssd_supported",
"wotpy.utils.utils.handle_observer_finalization",
"rx.Observable.create",
"rx.Observable.of",
"tornado.httpclient.AsyncHTTPClient",
"rx.Observable.empty",
"rx.Observable.throw",
"wotpy.wot.td.ThingDescription",
"six.iteritems",
"warnings.warn",
"logging.getLogger",
"wotpy.wot.td.ThingDescription.from_thing"
] |
[((1079, 1106), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1096, 1106), False, 'import logging\n'), ((2375, 2400), 'rx.Observable.of', 'Observable.of', (['*found_tds'], {}), '(*found_tds)\n', (2388, 2400), False, 'from rx import Observable\n'), ((5336, 5364), 'rx.Observable.create', 'Observable.create', (['subscribe'], {}), '(subscribe)\n', (5353, 5364), False, 'from rx import Observable\n'), ((6657, 6687), 'rx.Observable.merge', 'Observable.merge', (['*observables'], {}), '(*observables)\n', (6673, 6687), False, 'from rx import Observable\n'), ((6981, 6998), 'tornado.httpclient.AsyncHTTPClient', 'AsyncHTTPClient', ([], {}), '()\n', (6996, 6998), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest\n'), ((7022, 7068), 'tornado.httpclient.HTTPRequest', 'HTTPRequest', (['url'], {'request_timeout': 'timeout_secs'}), '(url, request_timeout=timeout_secs)\n', (7033, 7068), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest\n'), ((7150, 7180), 'json.loads', 'json.loads', (['http_response.body'], {}), '(http_response.body)\n', (7160, 7180), False, 'import json\n'), ((7194, 7218), 'wotpy.wot.td.ThingDescription', 'ThingDescription', (['td_doc'], {}), '(td_doc)\n', (7210, 7218), False, 'from wotpy.wot.td import ThingDescription\n'), ((7453, 7477), 'wotpy.wot.td.ThingDescription', 'ThingDescription', (['td_str'], {}), '(td_str)\n', (7469, 7477), False, 'from wotpy.wot.td import ThingDescription\n'), ((7494, 7539), 'wotpy.wot.consumed.thing.ConsumedThing', 'ConsumedThing', ([], {'servient': 'self._servient', 'td': 'td'}), '(servient=self._servient, td=td)\n', (7507, 7539), False, 'from wotpy.wot.consumed.thing import ConsumedThing\n'), ((8456, 8506), 'wotpy.wot.exposed.thing.ExposedThing', 'ExposedThing', ([], {'servient': 'self._servient', 'thing': 'thing'}), '(servient=self._servient, thing=thing)\n', (8468, 8506), False, 'from wotpy.wot.exposed.thing import ExposedThing\n'), ((1516, 1555), 'wotpy.wot.td.ThingDescription.from_thing', 'ThingDescription.from_thing', (['item.thing'], {}), '(item.thing)\n', (1543, 1555), False, 'from wotpy.wot.td import ThingDescription\n'), ((2597, 2617), 'wotpy.support.is_dnssd_supported', 'is_dnssd_supported', ([], {}), '()\n', (2615, 2617), False, 'from wotpy.support import is_dnssd_supported\n'), ((2631, 2686), 'warnings.warn', 'warnings.warn', (['"""Unsupported DNS-SD multicast discovery"""'], {}), "('Unsupported DNS-SD multicast discovery')\n", (2644, 2686), False, 'import warnings\n'), ((2756, 2774), 'rx.Observable.empty', 'Observable.empty', ([], {}), '()\n', (2772, 2774), False, 'from rx import Observable\n'), ((2958, 2976), 'rx.Observable.empty', 'Observable.empty', ([], {}), '()\n', (2974, 2976), False, 'from rx import Observable\n'), ((3169, 3207), 'wotpy.utils.utils.handle_observer_finalization', 'handle_observer_finalization', (['observer'], {}), '(observer)\n', (3197, 3207), False, 'from wotpy.utils.utils import handle_observer_finalization\n'), ((5920, 5941), 'rx.Observable.throw', 'Observable.throw', (['err'], {}), '(err)\n', (5936, 5941), False, 'from rx import Observable\n'), ((6160, 6181), 'rx.Observable.throw', 'Observable.throw', (['err'], {}), '(err)\n', (6176, 6181), False, 'from rx import Observable\n'), ((1611, 1644), 'wotpy.wot.td.ThingDescription.from_thing', 'ThingDescription.from_thing', (['item'], {}), '(item)\n', (1638, 1644), False, 'from wotpy.wot.td import ThingDescription\n'), ((3676, 3693), 'tornado.httpclient.AsyncHTTPClient', 'AsyncHTTPClient', ([], {}), '()\n', (3691, 3693), False, 'from tornado.httpclient import AsyncHTTPClient, HTTPRequest\n'), ((8081, 8108), 'wotpy.wot.thing.Thing', 'Thing', ([], {'thing_fragment': 'model'}), '(thing_fragment=model)\n', (8086, 8108), False, 'from wotpy.wot.thing import Thing\n'), ((1908, 1936), 'six.iteritems', 'six.iteritems', (['fragment_dict'], {}), '(fragment_dict)\n', (1921, 1936), False, 'import six\n'), ((2114, 2162), 'wotpy.wot.td.ThingDescription.from_thing', 'ThingDescription.from_thing', (['exposed_thing.thing'], {}), '(exposed_thing.thing)\n', (2141, 2162), False, 'from wotpy.wot.td import ThingDescription\n'), ((7972, 7999), 'wotpy.wot.td.ThingDescription', 'ThingDescription', ([], {'doc': 'model'}), '(doc=model)\n', (7988, 7999), False, 'from wotpy.wot.td import ThingDescription\n'), ((4336, 4367), 'json.loads', 'json.loads', (['catalogue_resp.body'], {}), '(catalogue_resp.body)\n', (4346, 4367), False, 'import json\n'), ((4783, 4813), 'wotpy.wot.td.ThingDescription', 'ThingDescription', (['td_resp.body'], {}), '(td_resp.body)\n', (4799, 4813), False, 'from wotpy.wot.td import ThingDescription\n'), ((4671, 4695), 'six.iteritems', 'six.iteritems', (['catalogue'], {}), '(catalogue)\n', (4684, 4695), False, 'import six\n')]
|
# Copyright © 2021 Ingram Micro Inc. All rights reserved.
from django.db import migrations
def create_users(apps, schema_editor):
User = apps.get_model('app', 'User')
to_create = []
for username in ('Mal', 'Zoe', 'Wash', 'Inara', 'Jayne', 'Kaylee', 'Simon', 'River'):
to_create.append(User(username=username))
User.objects.bulk_create(to_create)
def create_products(apps, schema_editor):
ProductType = apps.get_model('app', 'ProductType')
Product = apps.get_model('app', 'Product')
products = {
'food': ['apple', 'meat', 'banana'],
'weapon': ['blaster', 'gun', 'knife'],
'starships': ['Serenity'],
}
to_create = []
for key, items in products.items():
product_type = ProductType.objects.create(name=key)
for product in items:
to_create.append(Product(name=product, product_type=product_type))
Product.objects.bulk_create(to_create)
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RunPython(create_users, migrations.RunPython.noop),
migrations.RunPython(create_products, migrations.RunPython.noop),
]
|
[
"django.db.migrations.RunPython"
] |
[((1072, 1133), 'django.db.migrations.RunPython', 'migrations.RunPython', (['create_users', 'migrations.RunPython.noop'], {}), '(create_users, migrations.RunPython.noop)\n', (1092, 1133), False, 'from django.db import migrations\n'), ((1143, 1207), 'django.db.migrations.RunPython', 'migrations.RunPython', (['create_products', 'migrations.RunPython.noop'], {}), '(create_products, migrations.RunPython.noop)\n', (1163, 1207), False, 'from django.db import migrations\n')]
|
import os
import unittest
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import Project, ImageUrlCreateEntry
from autotrainer.blob.blob_client import LabelledBlob
from autotrainer.custom_vision.custom_vision_client import CustomVisionClient
from autotrainer.custom_vision.domain import Domain, to_domain_id
from autotrainer.custom_vision.classification_type import ClassificationType
CVTK=os.environ['CV_TRAINING_KEY']
endpoint=os.environ['CV_ENDPOINT']
training_client = CustomVisionTrainingClient(CVTK, endpoint)
class CustomVisionTests(unittest.TestCase):
projects: [Project]
def tearDown(self):
for project in self.projects:
training_client.delete_project(project.id)
self.projects.remove(project)
def setUp(self):
self.projects = []
def test_create_project(self):
client = CustomVisionClient(training_client)
project = client.create_project('test', 'test', Domain.GENERAL_CLASSIFICATION, ClassificationType.MULTICLASS)
self.projects.append(project) # add to delete later
self.assertIsNotNone(project)
self.assertIsInstance(project, Project)
self.assertIn('test', project.name)
projects = training_client.get_projects()
self.assertIn(project, projects)
def test_create_project_compact_multilabel(self):
client = CustomVisionClient(training_client)
project = client.create_project('test', 'test', Domain.GENERAL_CLASSIFICATION_COMPACT, ClassificationType.MULTILABEL)
self.projects.append(project)
self.assertIsNotNone(project)
self.assertIsInstance(project, Project)
self.assertIn('test', project.name)
self.assertEqual(project.settings.domain_id, to_domain_id(Domain.GENERAL_CLASSIFICATION_COMPACT) )
self.assertEqual(project.settings.classification_type, ClassificationType.MULTILABEL.value )
projects = training_client.get_projects()
self.assertIn(project, projects)
def test_create_image_url_list(self):
client = CustomVisionClient(training_client)
project = client.create_project('test','test', Domain.GENERAL_CLASSIFICATION, ClassificationType.MULTICLASS)
self.projects.append(project) # add to delete later
labelled_blobs = [LabelledBlob('url1', ['tomato','potato']), LabelledBlob('url2', ['banana','fig'])]
image_urls = client.create_image_url_list(project, labelled_blobs )
for labelled_blob in labelled_blobs:
self.assertIn(labelled_blob.download_url, [i.url for i in image_urls])
for image in image_urls:
self.assertIsInstance(image, ImageUrlCreateEntry)
|
[
"autotrainer.custom_vision.domain.to_domain_id",
"azure.cognitiveservices.vision.customvision.training.CustomVisionTrainingClient",
"autotrainer.blob.blob_client.LabelledBlob",
"autotrainer.custom_vision.custom_vision_client.CustomVisionClient"
] |
[((600, 642), 'azure.cognitiveservices.vision.customvision.training.CustomVisionTrainingClient', 'CustomVisionTrainingClient', (['CVTK', 'endpoint'], {}), '(CVTK, endpoint)\n', (626, 642), False, 'from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient\n'), ((992, 1027), 'autotrainer.custom_vision.custom_vision_client.CustomVisionClient', 'CustomVisionClient', (['training_client'], {}), '(training_client)\n', (1010, 1027), False, 'from autotrainer.custom_vision.custom_vision_client import CustomVisionClient\n'), ((1509, 1544), 'autotrainer.custom_vision.custom_vision_client.CustomVisionClient', 'CustomVisionClient', (['training_client'], {}), '(training_client)\n', (1527, 1544), False, 'from autotrainer.custom_vision.custom_vision_client import CustomVisionClient\n'), ((2220, 2255), 'autotrainer.custom_vision.custom_vision_client.CustomVisionClient', 'CustomVisionClient', (['training_client'], {}), '(training_client)\n', (2238, 2255), False, 'from autotrainer.custom_vision.custom_vision_client import CustomVisionClient\n'), ((1898, 1949), 'autotrainer.custom_vision.domain.to_domain_id', 'to_domain_id', (['Domain.GENERAL_CLASSIFICATION_COMPACT'], {}), '(Domain.GENERAL_CLASSIFICATION_COMPACT)\n', (1910, 1949), False, 'from autotrainer.custom_vision.domain import Domain, to_domain_id\n'), ((2462, 2504), 'autotrainer.blob.blob_client.LabelledBlob', 'LabelledBlob', (['"""url1"""', "['tomato', 'potato']"], {}), "('url1', ['tomato', 'potato'])\n", (2474, 2504), False, 'from autotrainer.blob.blob_client import LabelledBlob\n'), ((2505, 2544), 'autotrainer.blob.blob_client.LabelledBlob', 'LabelledBlob', (['"""url2"""', "['banana', 'fig']"], {}), "('url2', ['banana', 'fig'])\n", (2517, 2544), False, 'from autotrainer.blob.blob_client import LabelledBlob\n')]
|
import numpy as np
import pandas as pd
from ncls import NCLS
def _number_overlapping(scdf, ocdf, **kwargs):
keep_nonoverlapping = kwargs.get("keep_nonoverlapping", True)
column_name = kwargs.get("overlap_col", True)
if scdf.empty:
return None
if ocdf.empty:
if keep_nonoverlapping:
df = scdf.copy()
df.insert(df.shape[1], column_name, 0)
return df
else:
return None
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_self_indexes, _other_indexes = oncls.all_overlaps_both(
starts, ends, indexes)
s = pd.Series(_self_indexes)
counts_per_read = s.value_counts()[s.unique()].reset_index()
counts_per_read.columns = ["Index", "Count"]
df = scdf.copy()
if keep_nonoverlapping:
_missing_indexes = np.setdiff1d(scdf.index, _self_indexes)
missing = pd.DataFrame(data={"Index": _missing_indexes, "Count": 0}, index=_missing_indexes)
counts_per_read = pd.concat([counts_per_read, missing])
else:
df = df.loc[_self_indexes]
counts_per_read = counts_per_read.set_index("Index")
df.insert(df.shape[1], column_name, counts_per_read)
return df
def _coverage(scdf, ocdf, **kwargs):
fraction_col = kwargs["fraction_col"]
if scdf.empty:
return None
if ocdf.empty:
df = scdf.copy()
df.insert(df.shape[1], fraction_col, 0.0)
return df
oncls = NCLS(ocdf.Start.values, ocdf.End.values, ocdf.index.values)
starts = scdf.Start.values
ends = scdf.End.values
indexes = scdf.index.values
_lengths = oncls.coverage(starts, ends, indexes)
_lengths = _lengths / (ends - starts)
_fractions = _lengths
_fractions = _fractions.astype("float64")
_fractions = np.nan_to_num(_fractions)
scdf = scdf.copy()
scdf.insert(scdf.shape[1], fraction_col, _fractions)
return scdf
|
[
"pandas.DataFrame",
"numpy.nan_to_num",
"ncls.NCLS",
"numpy.setdiff1d",
"pandas.Series",
"pandas.concat"
] |
[((471, 530), 'ncls.NCLS', 'NCLS', (['ocdf.Start.values', 'ocdf.End.values', 'ocdf.index.values'], {}), '(ocdf.Start.values, ocdf.End.values, ocdf.index.values)\n', (475, 530), False, 'from ncls import NCLS\n'), ((724, 748), 'pandas.Series', 'pd.Series', (['_self_indexes'], {}), '(_self_indexes)\n', (733, 748), True, 'import pandas as pd\n'), ((1570, 1629), 'ncls.NCLS', 'NCLS', (['ocdf.Start.values', 'ocdf.End.values', 'ocdf.index.values'], {}), '(ocdf.Start.values, ocdf.End.values, ocdf.index.values)\n', (1574, 1629), False, 'from ncls import NCLS\n'), ((1907, 1932), 'numpy.nan_to_num', 'np.nan_to_num', (['_fractions'], {}), '(_fractions)\n', (1920, 1932), True, 'import numpy as np\n'), ((941, 980), 'numpy.setdiff1d', 'np.setdiff1d', (['scdf.index', '_self_indexes'], {}), '(scdf.index, _self_indexes)\n', (953, 980), True, 'import numpy as np\n'), ((999, 1086), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Index': _missing_indexes, 'Count': 0}", 'index': '_missing_indexes'}), "(data={'Index': _missing_indexes, 'Count': 0}, index=\n _missing_indexes)\n", (1011, 1086), True, 'import pandas as pd\n'), ((1108, 1145), 'pandas.concat', 'pd.concat', (['[counts_per_read, missing]'], {}), '([counts_per_read, missing])\n', (1117, 1145), True, 'import pandas as pd\n')]
|
from django import forms
from django.core.urlresolvers import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit, HTML
from fabric_bolt.web_hooks import models
class HookCreateForm(forms.ModelForm):
button_prefix = "Create"
project = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = models.Hook
fields = [
'project',
'url',
]
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'project',
'url',
ButtonHolder(
Submit('submit', '%s Hook' % self.button_prefix, css_class='button')
)
)
super(HookCreateForm, self).__init__(*args, **kwargs)
def clean_project(self, *args, **kwargs):
if not self.cleaned_data['project']:
return None
project = models.Project.objects.get(pk=int(self.cleaned_data['project']))
return project
class HookUpdateForm(HookCreateForm):
button_prefix = "Update"
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
instance = kwargs['instance']
delete_url = reverse('hooks_hook_delete', args=(instance.pk,))
self.helper.layout = Layout(
'project',
'url',
ButtonHolder(
Submit('submit', '%s Hook' % self.button_prefix, css_class='button'),
HTML('<a href="' + delete_url + '" class="btn btn-danger">Delete Hook</a>'),
)
)
super(HookCreateForm, self).__init__(*args, **kwargs)
|
[
"crispy_forms.layout.HTML",
"django.core.urlresolvers.reverse",
"crispy_forms.helper.FormHelper",
"django.forms.HiddenInput",
"crispy_forms.layout.Submit"
] |
[((549, 561), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (559, 561), False, 'from crispy_forms.helper import FormHelper\n'), ((1199, 1211), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (1209, 1211), False, 'from crispy_forms.helper import FormHelper\n'), ((1272, 1321), 'django.core.urlresolvers.reverse', 'reverse', (['"""hooks_hook_delete"""'], {'args': '(instance.pk,)'}), "('hooks_hook_delete', args=(instance.pk,))\n", (1279, 1321), False, 'from django.core.urlresolvers import reverse\n'), ((332, 351), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (349, 351), False, 'from django import forms\n'), ((684, 752), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', "('%s Hook' % self.button_prefix)"], {'css_class': '"""button"""'}), "('submit', '%s Hook' % self.button_prefix, css_class='button')\n", (690, 752), False, 'from crispy_forms.layout import Layout, ButtonHolder, Submit, HTML\n'), ((1445, 1513), 'crispy_forms.layout.Submit', 'Submit', (['"""submit"""', "('%s Hook' % self.button_prefix)"], {'css_class': '"""button"""'}), "('submit', '%s Hook' % self.button_prefix, css_class='button')\n", (1451, 1513), False, 'from crispy_forms.layout import Layout, ButtonHolder, Submit, HTML\n'), ((1531, 1606), 'crispy_forms.layout.HTML', 'HTML', (['(\'<a href="\' + delete_url + \'" class="btn btn-danger">Delete Hook</a>\')'], {}), '(\'<a href="\' + delete_url + \'" class="btn btn-danger">Delete Hook</a>\')\n', (1535, 1606), False, 'from crispy_forms.layout import Layout, ButtonHolder, Submit, HTML\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 16:51:06 2019
@author: chaoz
"""
import xml.etree.ElementTree as ET
import cv2
import os
import numpy as np
only1=0
kdanfkn=0
total_sc=0
image_dir='C:\\Users\\chaoz\\Desktop\\testset'
sname='RGB.png'
dname='D.png'
xmlname='xml'
RGB_dirlist=[]
depth_dirlist=[]
xml_dirlist=[]
for dire in os.listdir(image_dir):
pwd_dir=dire
if sname in os.path.split(pwd_dir)[1]:
RGB_dirlist.append(pwd_dir)
for i in range(len(RGB_dirlist)):
print(i,i/len(RGB_dirlist))
RGB_dir=os.path.join(image_dir,RGB_dirlist[i])
xml_dir='C:\\Users\\chaoz\\Desktop\\testxml\\'+RGB_dirlist[i].split('.')[0]+'.xml'
if os.path.exists(xml_dir)==True:
tree = ET.parse(xml_dir)
rect={}
line=""
root = tree.getroot()
rgb_image = np.array(cv2.imread(RGB_dir, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
#open image before draw
# depth_image_path=depth_dir
# depth_image = np.array(cv2.imread(depth_image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
#
# im=depth_image.astype(int)
# min_exclude_0=im[im!=0].min()
# max_exclude_0=im[im!=0].max()
#
# diff = max_exclude_0 - min_exclude_0
#
#
# for i in range (np.shape(im)[0]):
# for j in range(np.shape(im)[1]):
# if im[i,j]!=0:
# im[i,j]=(im[i,j]-min_exclude_0)*255/diff
#
# im[np.where(im==0)]=255
# im2=im
# im3=im
# im2=np.concatenate((im,im2),axis=1)
# im3=np.concatenate((im2,im3),axis=1)
img=rgb_image
for ob in root.iter('object'):
if ob[0].text=='bag1':
for bndbox in ob.iter('bndbox'):
for xmin in bndbox.iter('xmin'):
rect['xmin'] = xmin.text
for ymin in bndbox.iter('ymin'):
rect['ymin'] = ymin.text
for xmax in bndbox.iter('xmax'):
rect['xmax'] = xmax.text
for ymax in bndbox.iter('ymax'):
rect['ymax'] = ymax.text
# draw
cv2.rectangle(img, (int(rect['xmin']), int(rect['ymax'])), (int(rect['xmax']), int(rect['ymin'])), (0, 0, 255), 5)
elif ob[0].text=='bag2':
for bndbox in ob.iter('bndbox'):
for xmin in bndbox.iter('xmin'):
rect['xmin'] = xmin.text
for ymin in bndbox.iter('ymin'):
rect['ymin'] = ymin.text
for xmax in bndbox.iter('xmax'):
rect['xmax'] = xmax.text
for ymax in bndbox.iter('ymax'):
rect['ymax'] = ymax.text
# draw
cv2.rectangle(img, (int(rect['xmin']), int(rect['ymax'])), (int(rect['xmax']), int(rect['ymin'])), (0, 255, 0), 5)
# cv2.imwrite(depth_dir.split('_')[0]+'_boundingbox.png',img)
cv2.imwrite('C:\\Users\\chaoz\\Desktop\\ss\\'+RGB_dirlist[i],img)
|
[
"xml.etree.ElementTree.parse",
"cv2.imwrite",
"os.path.exists",
"cv2.imread",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((340, 361), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (350, 361), False, 'import os\n'), ((543, 582), 'os.path.join', 'os.path.join', (['image_dir', 'RGB_dirlist[i]'], {}), '(image_dir, RGB_dirlist[i])\n', (555, 582), False, 'import os\n'), ((676, 699), 'os.path.exists', 'os.path.exists', (['xml_dir'], {}), '(xml_dir)\n', (690, 699), False, 'import os\n'), ((729, 746), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_dir'], {}), '(xml_dir)\n', (737, 746), True, 'import xml.etree.ElementTree as ET\n'), ((3103, 3171), 'cv2.imwrite', 'cv2.imwrite', (["('C:\\\\Users\\\\chaoz\\\\Desktop\\\\ss\\\\' + RGB_dirlist[i])", 'img'], {}), "('C:\\\\Users\\\\chaoz\\\\Desktop\\\\ss\\\\' + RGB_dirlist[i], img)\n", (3114, 3171), False, 'import cv2\n'), ((396, 418), 'os.path.split', 'os.path.split', (['pwd_dir'], {}), '(pwd_dir)\n', (409, 418), False, 'import os\n'), ((838, 900), 'cv2.imread', 'cv2.imread', (['RGB_dir', '(cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(RGB_dir, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n', (848, 900), False, 'import cv2\n')]
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the classical Jacobian transform
"""
# pylint: disable=import-outside-toplevel
import pennylane as qml
def classical_jacobian(qnode):
"""Function to extract the Jacobian
matrix of the classical part of a QNode"""
def classical_preprocessing(*args, **kwargs):
"""Returns the trainable gate parameters for
a given QNode input"""
qnode.construct(args, kwargs)
return qml.math.stack(qnode.qtape.get_parameters())
if qnode.interface == "autograd":
return qml.jacobian(classical_preprocessing)
if qnode.interface == "torch":
import torch
def _jacobian(*args, **kwargs): # pylint: disable=unused-argument
return torch.autograd.functional.jacobian(classical_preprocessing, args)
return _jacobian
if qnode.interface == "jax":
import jax
return jax.jacobian(classical_preprocessing)
if qnode.interface == "tf":
import tensorflow as tf
def _jacobian(*args, **kwargs):
with tf.GradientTape() as tape:
tape.watch(args)
gate_params = classical_preprocessing(*args, **kwargs)
return tape.jacobian(gate_params, args)
return _jacobian
|
[
"jax.jacobian",
"torch.autograd.functional.jacobian",
"pennylane.jacobian",
"tensorflow.GradientTape"
] |
[((1152, 1189), 'pennylane.jacobian', 'qml.jacobian', (['classical_preprocessing'], {}), '(classical_preprocessing)\n', (1164, 1189), True, 'import pennylane as qml\n'), ((1516, 1553), 'jax.jacobian', 'jax.jacobian', (['classical_preprocessing'], {}), '(classical_preprocessing)\n', (1528, 1553), False, 'import jax\n'), ((1348, 1413), 'torch.autograd.functional.jacobian', 'torch.autograd.functional.jacobian', (['classical_preprocessing', 'args'], {}), '(classical_preprocessing, args)\n', (1382, 1413), False, 'import torch\n'), ((1683, 1700), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1698, 1700), True, 'import tensorflow as tf\n')]
|
import os
import sys
import platform
import numpy
import threading
import ctypes
import string
import random
import requests
import json
from colorama import Fore
VALID = 0
INVALID = 0
BOOST_LENGTH = 24
CLASSIC_LENGTH = 16
CODESET = []
BASEURL = "https://discord.gift/"
CODESET[:0] = string.ascii_letters + string.digits
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: 0 | Invalid: 0")
os.system("cls")
NOIRGEN = """[40;35m
[40;34mv 1.0.0[40;35m
/$$ /$$ /$$ /$$$$$$
| $$$ | $$ |__/ /$$__ $$
| $$$$| $$ /$$$$$$ /$$ /$$$$$$ | $$ \__/ /$$$$$$ /$$$$$$$
| $$ $$ $$ /$$__ $$| $$ /$$__ $$| $$ /$$$$ /$$__ $$| $$__ $$
| $$ $$$$| $$ \ $$| $$| $$ \__/| $$|_ $$| $$$$$$$$| $$ \ $$
| $$\ $$$| $$ | $$| $$| $$ | $$ \ $$| $$_____/| $$ | $$
| $$ \ $$| $$$$$$/| $$| $$ | $$$$$$/| $$$$$$$| $$ | $$
|__/ \__/ \______/ |__/|__/ \______/ \_______/|__/ |__/
"""
print(NOIRGEN)
for i in range(3):
print('')
CODE_AMOUNT = int(input(" [40;36mCodes to Generate => "))
for i in range(2):
print('')
BOOST_CLASSIC = str(input(" [40;32mBoost or Classic => "))
for i in range(2):
print('')
THREAD_COUNT = int(input(" [40;31mThreads => "))
for i in range(5):
print('')
def checkBoost(boostURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{boostURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genBoost():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, BOOST_LENGTH])
for i in code:
try:
boostCode = ''.join(e for e in i)
boostURL = BASEURL + boostCode
if checkBoost(boostURL):
with open("valid.txt", "w") as f:
f.write(boostURL + "\n")
print(Fore.GREEN + f"[!] VALID | {boostURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {boostURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
def checkClassic(classicURL):
global VALID
global INVALID
CHECKURL = f"https://discordapp.com/api/v9/entitlements/gift-codes/{classicURL}?with_application=false&with_subscription_plan=true"
resp = requests.get(CHECKURL)
if resp.status_code == 200:
VALID += 1
return True
else:
INVALID += 1
return False
def genClassic():
global VALID
global INVALID
for i in range(CODE_AMOUNT):
code = numpy.random.choice(CODESET, size=[CODE_AMOUNT, CLASSIC_LENGTH])
for i in code:
try:
classicCode = ''.join(e for e in i)
classicURL = BASEURL + classicCode
if checkClassic(classicURL):
with open("valid.txt", "w") as f:
f.write(classicURL + "\n")
print(Fore.GREEN + f"[!] VALID | {classicURL}")
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
else:
ctypes.windll.kernel32.SetConsoleTitleW(f"NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}")
print(Fore.RED + f"[!] INVALID | {classicURL}")
except Exception as e:
print(e)
print(Fore.RED + "[!] An Error has Occured!")
if BOOST_CLASSIC == "Boost" or "B" or "b" or "boost":
for i in range(THREAD_COUNT):
threading.Thread(target=genBoost).start()
elif BOOST_CLASSIC == "Classic" or "C" or "c" or "classic":
for i in range(THREAD_COUNT):
threading.Thread(target=genClassic).start()
|
[
"threading.Thread",
"os.system",
"ctypes.windll.kernel32.SetConsoleTitleW",
"requests.get",
"numpy.random.choice"
] |
[((330, 422), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: 0 | Invalid: 0"""'], {}), "(\n f'NoirGen and Checker | Valid: 0 | Invalid: 0')\n", (369, 422), False, 'import ctypes\n'), ((418, 434), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (427, 434), False, 'import os\n'), ((2097, 2119), 'requests.get', 'requests.get', (['CHECKURL'], {}), '(CHECKURL)\n', (2109, 2119), False, 'import requests\n'), ((3453, 3475), 'requests.get', 'requests.get', (['CHECKURL'], {}), '(CHECKURL)\n', (3465, 3475), False, 'import requests\n'), ((2355, 2417), 'numpy.random.choice', 'numpy.random.choice', (['CODESET'], {'size': '[CODE_AMOUNT, BOOST_LENGTH]'}), '(CODESET, size=[CODE_AMOUNT, BOOST_LENGTH])\n', (2374, 2417), False, 'import numpy\n'), ((3713, 3777), 'numpy.random.choice', 'numpy.random.choice', (['CODESET'], {'size': '[CODE_AMOUNT, CLASSIC_LENGTH]'}), '(CODESET, size=[CODE_AMOUNT, CLASSIC_LENGTH])\n', (3732, 3777), False, 'import numpy\n'), ((4708, 4741), 'threading.Thread', 'threading.Thread', ([], {'target': 'genBoost'}), '(target=genBoost)\n', (4724, 4741), False, 'import threading\n'), ((2795, 2901), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (2834, 2901), False, 'import ctypes\n'), ((2940, 3046), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (2979, 3046), False, 'import ctypes\n'), ((4169, 4275), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (4208, 4275), False, 'import ctypes\n'), ((4314, 4420), 'ctypes.windll.kernel32.SetConsoleTitleW', 'ctypes.windll.kernel32.SetConsoleTitleW', (['f"""NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}"""'], {}), "(\n f'NoirGen and Checker | Valid: {VALID} | Invalid: {INVALID}')\n", (4353, 4420), False, 'import ctypes\n'), ((4852, 4887), 'threading.Thread', 'threading.Thread', ([], {'target': 'genClassic'}), '(target=genClassic)\n', (4868, 4887), False, 'import threading\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[28]:
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = (r"C:\Users\Marcus\Documents\School Documents\Python Environments\Unit_4\sample1percent.osm")
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
postcodes_re = re.compile(r'^\D*(\d{5}).*')
cities_re = re.compile(r'.+', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
#Makes a dictionary of all of the street types to allow us to create a list to update
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
#If the element key for 'k' is 'addr:street', return the associated value pair
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
""" Here we create a dictionary of type set called street_types,
and turn the open function into a variable for ease of use in the future
Next is my pride and joy, instead of using "for et.iterparse" to iterate directly line by line
through the file instead we use the osm_file var to open the file in memory, and
then turn it into an iterable. This saves a TON of time, as we can iterate on the file
in memory instead of iterating the file line by line. Once we do this, we then iterate through and
for each tag that matches "node" or "way", we check if it is a street name, and if so we run the audit_street_types function.
we then clear the root tree, saving memory and time, close the file, and return the updated street_types dict.
"""
def audit_s(osmfile):
street_types = defaultdict(set)
osm_file = open(osmfile, "r")
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
root.clear()
osm_file.close()
return street_types
""" The update_street function takes the information we learned from the audit_s function
and utilizes that to check a manually created mapping dictionary and DONT_UPDATE tuple.
These two objects are created by reading the report from audit_s and choosing how we want to standardize the types.
to go above and beyond, we also standardized prefixes such as N for North. Unfortunely this caused an issue where Highway or Route,
which often had the suffix N would be incorrectly corrected to North, such as Route North.
Therefore we created the DON_UPDATE tuple, and check each value against the tuple, and if there is a match
the value is not updated. To fix the street types, we broke the value into parts
seperated by whitespace using .split(), then change the value if it matches the key found in mapping, to the paired value.
Finally, the seperated parts are then rejoined with a space inbetween using the .join() function.
"""
def update_street(name):
mapping = {"St": "Street",
"Rd.": "Road",
"Rd": "Road",
"N.": "North",
"N": "North",
"S.": "South",
"Blvd": "Boulevard",
"Blvd.": "Boulevard",
"Expy": "Expressway",
"Ln": "Lane",
"Ctr": "Center",
"Ctr.": "Center",
"5th": "Fifth",
"4th": "Fourth",
"3rd": "Third",
"2nd": "Second",
"1st": "First",
#There was a street named just dade...that's it..so I went on google to find the real address, so this corrects that occurance.
"Dade": "South Dade Avenue",
"MO-94": "Highway 94"
}
DONT_UPDATE = ('route','suite')
if name.lower().startswith(DONT_UPDATE):
return name
else:
return ' '.join(mapping.get(part, part).title() for part in name.split())
def dicti(data, item):
"""This function creates a dictionary where postcodes can be held.
The dictionary key will be the postcode itself and the dictionary value
is a count of postcodes that were repeated throughout the dataset."""
data[item] += 1
#This function returns the elem if 'k' matches "addr:postcode"
def is_postcode(elem):
return (elem.attrib['k'] == "addr:postcode")
#This codes is identical in function the the street function of similar name
def audit_p(osmfile):
osm_file = open(OSMFILE, "r")
data = defaultdict(int)
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_postcode(tag):
dicti(data, tag.attrib['v'])
root.clear()
osm_file.close()
return data
# This is the function that actually changes the post code to the proper values
# It is called in the OSM_to_XML file, when writing the changes to the .csv
def update_postcode(postcodes):
output = list()
if re.search(postcodes_re, postcodes):
new_zip = re.search(postcodes_re, postcodes).group(1)
output.append(new_zip)
return ', '.join(str(x) for x in output)
#Once again, this is similar in function to audit_street
def audit_city(city_dict, city_ex):
m = cities_re.search(city_ex)
if m:
city_group = m.group()
city_dict[city_group].add(city_ex)
#Same function as is_postcode, but for addr:city
def is_city(elem):
return (elem.attrib['k'] == "addr:city")
#Same function as audit_s, but for city values.
def audit_C(osmfile):
city_dict = defaultdict(set)
osm_file = open(osmfile, "r")
# get an iterable
iterable = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
iterable = iter(iterable)
# get the root element
event, root = iterable.next()
for event, elem in iterable:
if event == "end" and (elem.tag == "node" or elem.tag == "way"):
for tag in elem.iter("tag"):
if is_city(tag):
audit_city(city_dict, tag.attrib['v'])
root.clear()
osm_file.close()
return city_dict
""" Same function as the update_street, except instead of it skipping the
the matched tuple, instead it instead uses the ofallon_mapping dict to correct the
inconsistency of some cities being listed as O'fallon and some as O fallon.
"""
def update_city(name):
OFALLON = ('o')
ofallon_mapping = {"O": "O'"}
city_mapping = {"St": "Saint",
"St.": "Saint",
"bridgeton" : "Bridgeton",
"drive-through": "O'Fallon",
"Bass": "Saint",
"Pro": "Charles",
"Drive": "",
"UNINCORPORATED": "Saint Peters",
}
if name.lower().startswith(OFALLON):
return ''.join((ofallon_mapping.get(part, part)).title() for part in name.split())
return ' '.join((city_mapping.get(part, part)).title() for part in name.split())
def test():
street_types = audit_s(OSMFILE)
pprint.pprint(dict(street_types))
postcodes = audit_p(OSMFILE)
pprint.pprint(dict(postcodes))
c_names = audit_C(OSMFILE)
pprint.pprint(dict(c_names))
for st_type, ways in street_types.items():
for name in ways:
better_name = update_street(name)
print (name, "=>", better_name)
if name == "N. Main Ctr.":
assert better_name == "North Main Center"
if name == "Zumbehl Rd":
assert better_name == "Zumbehl Road"
if name == "N 3rd St":
assert better_name == "North Third Street"
if name == "Route N":
assert better_name == "Route N"
for postcode, nums in postcodes.items():
better_code = update_postcode(postcode)
print(postcode, "=>", better_code)
for c_name, ways in c_names.items():
for name in ways:
better_city_name = update_city(name)
print (name, "=>", better_city_name)
if __name__ == '__main__':
test()
# In[ ]:
|
[
"collections.defaultdict",
"xml.etree.cElementTree.iterparse",
"re.search",
"re.compile"
] |
[((267, 308), 're.compile', 're.compile', (['"""\\\\b\\\\S+\\\\.?$"""', 're.IGNORECASE'], {}), "('\\\\b\\\\S+\\\\.?$', re.IGNORECASE)\n", (277, 308), False, 'import re\n'), ((322, 351), 're.compile', 're.compile', (['"""^\\\\D*(\\\\d{5}).*"""'], {}), "('^\\\\D*(\\\\d{5}).*')\n", (332, 351), False, 'import re\n'), ((363, 394), 're.compile', 're.compile', (['""".+"""', 're.IGNORECASE'], {}), "('.+', re.IGNORECASE)\n", (373, 394), False, 'import re\n'), ((1799, 1815), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1810, 1815), False, 'from collections import defaultdict\n'), ((1888, 1935), 'xml.etree.cElementTree.iterparse', 'ET.iterparse', (['osm_file'], {'events': "('start', 'end')"}), "(osm_file, events=('start', 'end'))\n", (1900, 1935), True, 'import xml.etree.cElementTree as ET\n'), ((4787, 4803), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4798, 4803), False, 'from collections import defaultdict\n'), ((4842, 4889), 'xml.etree.cElementTree.iterparse', 'ET.iterparse', (['osm_file'], {'events': "('start', 'end')"}), "(osm_file, events=('start', 'end'))\n", (4854, 4889), True, 'import xml.etree.cElementTree as ET\n'), ((5528, 5562), 're.search', 're.search', (['postcodes_re', 'postcodes'], {}), '(postcodes_re, postcodes)\n', (5537, 5562), False, 'import re\n'), ((6117, 6133), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (6128, 6133), False, 'from collections import defaultdict\n'), ((6206, 6253), 'xml.etree.cElementTree.iterparse', 'ET.iterparse', (['osm_file'], {'events': "('start', 'end')"}), "(osm_file, events=('start', 'end'))\n", (6218, 6253), True, 'import xml.etree.cElementTree as ET\n'), ((5582, 5616), 're.search', 're.search', (['postcodes_re', 'postcodes'], {}), '(postcodes_re, postcodes)\n', (5591, 5616), False, 'import re\n')]
|
import numpy as np
class RidgeRegression:
def __init__(self, bias=True, weight_l2=1e-3, scale=True):
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
self.scale = scale
def _scale(self, X):
return (X - self._min) / (self._max - self._min)
def fit(self, X, y):
if self.scale:
self._min = X.min(axis=0)
self._max = X.max(axis=0)
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.linalg.pinv(X.T @ X + self.weight_l2 * np.eye(n_features)) @ X.T @ y
def predict(self, X):
if self.scale:
X = self._scale(X)
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return X @ self.weights
class LogisticRegression:
def __init__(self, lr=1e-2, bias=True, weight_l2=1e-3):
self.lr = lr
self.bias = bias
self.weight_l2 = weight_l2
self.weights = None
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def fit(self, X, y, max_iter=100):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
for _ in range(max_iter):
y_hat = self._sigmoid(X @ self.weights)
self.weights -= self.lr * (self.weight_l2 * 2 * self.weights + (1 / n_samples) * X.T @ (y_hat - y))
def predict(self, X):
if self.bias:
X = np.hstack((np.ones((X.shape[0], 1)), X))
return self._sigmoid(X @ self.weights)
|
[
"numpy.eye",
"numpy.ones",
"numpy.zeros",
"numpy.exp"
] |
[((1402, 1422), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (1410, 1422), True, 'import numpy as np\n'), ((1195, 1205), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1201, 1205), True, 'import numpy as np\n'), ((541, 565), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (548, 565), True, 'import numpy as np\n'), ((858, 882), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (865, 882), True, 'import numpy as np\n'), ((1300, 1324), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1307, 1324), True, 'import numpy as np\n'), ((1710, 1734), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1717, 1734), True, 'import numpy as np\n'), ((685, 703), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (691, 703), True, 'import numpy as np\n')]
|
# Generated by Django 3.0.5 on 2021-01-13 10:21
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='JobApplicants',
fields=[
('uid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('candidatename', models.TextField(default='', max_length=100)),
('appliedfor', models.TextField(default='', max_length=100)),
('email', models.EmailField(max_length=254)),
('experience', models.TextField(default='')),
('resumeurl', models.TextField(default='')),
],
),
]
|
[
"django.db.models.DateTimeField",
"django.db.models.UUIDField",
"django.db.models.TextField",
"django.db.models.EmailField"
] |
[((364, 435), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, primary_key=True, serialize=False)\n', (380, 435), False, 'from django.db import migrations, models\n'), ((463, 518), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (483, 518), False, 'from django.db import migrations, models\n'), ((555, 599), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'max_length': '(100)'}), "(default='', max_length=100)\n", (571, 599), False, 'from django.db import migrations, models\n'), ((633, 677), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'max_length': '(100)'}), "(default='', max_length=100)\n", (649, 677), False, 'from django.db import migrations, models\n'), ((706, 739), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (723, 739), False, 'from django.db import migrations, models\n'), ((773, 801), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (789, 801), False, 'from django.db import migrations, models\n'), ((834, 862), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (850, 862), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from tensorflow.keras.utils import Sequence
from core.dataset import augment
from core.image import read_image, preprocess_image
from core.utils import decode_annotation, decode_name
class Dataset(Sequence):
def __init__(self, cfg, verbose=0):
self.verbose = verbose
self.mask = cfg["yolo"]["mask"]
self.anchors = cfg["yolo"]["anchors"]
self.max_boxes = cfg["yolo"]["max_boxes"]
self.strides = cfg["yolo"]["strides"]
self.name_path = cfg['yolo']['name_path']
self.anno_path = cfg["train"]["anno_path"]
self.image_size = cfg["train"]["image_size"]
self.batch_size = cfg["train"]["batch_size"]
self.normal_method = cfg['train']["normal_method"]
self.mosaic = cfg['train']['mosaic']
self.label_smoothing = cfg['train']["label_smoothing"]
self.annotation = decode_annotation(anno_path=self.anno_path)
self.num_anno = len(self.annotation)
self.name = decode_name(name_path=self.name_path)
self.num_classes = len(self.name)
# init
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
def __len__(self):
return int(np.ceil(float(len(self.annotation)) / self.batch_size))
def __getitem__(self, idx):
l_bound = idx * self.batch_size
r_bound = (idx + 1) * self.batch_size
if r_bound > len(self.annotation):
r_bound = len(self.annotation)
l_bound = r_bound - self.batch_size
self._on_batch_start(idx)
batch_image = np.zeros((r_bound - l_bound, self._image_size, self._image_size, 3), dtype=np.float32)
batch_label = [np.zeros((r_bound - l_bound, size, size, len(mask_per_layer) * (5 + self.num_classes)),
dtype=np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
for i, sub_idx in enumerate(range(l_bound, r_bound)):
image, bboxes, labels = self._getitem(sub_idx)
if self.mosaic:
sub_idx = np.random.choice(np.delete(np.arange(self.num_anno), idx), 3, False)
image2, bboxes2, labels2 = self._getitem(sub_idx[0])
image3, bboxes3, labels3 = self._getitem(sub_idx[1])
image4, bboxes4, labels4 = self._getitem(sub_idx[2])
image, bboxes, labels = augment.mosic(image, bboxes, labels,
image2, bboxes2, labels2,
image3, bboxes3, labels3,
image4, bboxes4, labels4)
if self.normal_method:
image = augment.random_distort(image)
image = augment.random_grayscale(image)
image, bboxes = augment.random_flip_lr(image, bboxes)
image, bboxes = augment.random_rotate(image, bboxes)
image, bboxes, labels = augment.random_crop_and_zoom(image, bboxes, labels,
(self._image_size, self._image_size))
image, bboxes, labels = augment.bbox_filter(image, bboxes, labels)
labels = self._preprocess_true_boxes(bboxes, labels)
batch_image[i] = image
for j in range(len(self.mask)):
batch_label[j][i, :, :, :] = labels[j][:, :, :]
return batch_image, batch_label
def _getitem(self, sub_idx):
path, bboxes, labels = self.annotation[sub_idx]
image = read_image(path)
if len(bboxes) != 0:
bboxes, labels = np.array(bboxes), np.array(labels)
else:
bboxes, labels = np.zeros((0, 4)), np.zeros((0,))
image, bboxes = preprocess_image(image, (self._image_size, self._image_size), bboxes)
labels = augment.onehot(labels, self.num_classes, self.label_smoothing)
return image, bboxes, labels
def _preprocess_true_boxes(self, bboxes, labels):
bboxes_label = [np.zeros((size, size, len(mask_per_layer), 5 + self.num_classes), np.float32)
for size, mask_per_layer in zip(self._grid_size, self.mask)]
bboxes = np.array(bboxes, dtype=np.float32)
# calculate anchor index for true boxes
anchor_area = self.anchors[:, 0] * self.anchors[:, 1]
bboxes_wh = bboxes[:, 2:4] - bboxes[:, 0:2]
bboxes_wh_exp = np.tile(np.expand_dims(bboxes_wh, 1), (1, self.anchors.shape[0], 1))
boxes_area = bboxes_wh_exp[..., 0] * bboxes_wh_exp[..., 1]
intersection = np.minimum(bboxes_wh_exp[..., 0], self.anchors[:, 0]) * np.minimum(bboxes_wh_exp[..., 1],
self.anchors[:, 1])
iou = intersection / (boxes_area + anchor_area - intersection + 1e-8) # (N, A)
best_anchor_idxs = np.argmax(iou, axis=-1) # (N,)
for i, bbox in enumerate(bboxes):
search = np.where(self.mask == best_anchor_idxs[i])
best_detect = search[0][0]
best_anchor = search[1][0]
coord_xy = (bbox[0:2] + bbox[2:4]) * 0.5
coord_xy /= self.strides[best_detect]
coord_xy = coord_xy.astype(np.int)
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, :4] = bbox
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 4:5] = 1.
bboxes_label[best_detect][coord_xy[1], coord_xy[0], best_anchor, 5:] = labels[i, :]
return [layer.reshape([layer.shape[0], layer.shape[1], -1]) for layer in bboxes_label]
def _on_batch_start(self, idx, patience=10):
if idx % patience == 0:
self._image_size = np.random.choice(self.image_size)
self._grid_size = self._image_size // self.strides
if self.verbose:
print('Change image size to', self._image_size)
def on_epoch_end(self):
np.random.shuffle(self.annotation) # shuffle
from core.utils import decode_cfg, load_weights
cfg = decode_cfg("cfgs/custom.yaml")
train_dataset = Dataset(cfg)
|
[
"core.dataset.augment.bbox_filter",
"numpy.argmax",
"core.dataset.augment.random_distort",
"numpy.arange",
"core.image.preprocess_image",
"core.utils.decode_annotation",
"core.dataset.augment.random_rotate",
"core.dataset.augment.random_flip_lr",
"numpy.random.choice",
"numpy.random.shuffle",
"core.dataset.augment.random_grayscale",
"numpy.minimum",
"core.utils.decode_cfg",
"core.dataset.augment.random_crop_and_zoom",
"core.dataset.augment.mosic",
"core.image.read_image",
"core.utils.decode_name",
"core.dataset.augment.onehot",
"numpy.zeros",
"numpy.expand_dims",
"numpy.where",
"numpy.array"
] |
[((6246, 6276), 'core.utils.decode_cfg', 'decode_cfg', (['"""cfgs/custom.yaml"""'], {}), "('cfgs/custom.yaml')\n", (6256, 6276), False, 'from core.utils import decode_cfg, load_weights\n'), ((913, 956), 'core.utils.decode_annotation', 'decode_annotation', ([], {'anno_path': 'self.anno_path'}), '(anno_path=self.anno_path)\n', (930, 956), False, 'from core.utils import decode_annotation, decode_name\n'), ((1022, 1059), 'core.utils.decode_name', 'decode_name', ([], {'name_path': 'self.name_path'}), '(name_path=self.name_path)\n', (1033, 1059), False, 'from core.utils import decode_annotation, decode_name\n'), ((1145, 1178), 'numpy.random.choice', 'np.random.choice', (['self.image_size'], {}), '(self.image_size)\n', (1161, 1178), True, 'import numpy as np\n'), ((1650, 1741), 'numpy.zeros', 'np.zeros', (['(r_bound - l_bound, self._image_size, self._image_size, 3)'], {'dtype': 'np.float32'}), '((r_bound - l_bound, self._image_size, self._image_size, 3), dtype=\n np.float32)\n', (1658, 1741), True, 'import numpy as np\n'), ((3671, 3687), 'core.image.read_image', 'read_image', (['path'], {}), '(path)\n', (3681, 3687), False, 'from core.image import read_image, preprocess_image\n'), ((3883, 3952), 'core.image.preprocess_image', 'preprocess_image', (['image', '(self._image_size, self._image_size)', 'bboxes'], {}), '(image, (self._image_size, self._image_size), bboxes)\n', (3899, 3952), False, 'from core.image import read_image, preprocess_image\n'), ((3970, 4032), 'core.dataset.augment.onehot', 'augment.onehot', (['labels', 'self.num_classes', 'self.label_smoothing'], {}), '(labels, self.num_classes, self.label_smoothing)\n', (3984, 4032), False, 'from core.dataset import augment\n'), ((4332, 4366), 'numpy.array', 'np.array', (['bboxes'], {'dtype': 'np.float32'}), '(bboxes, dtype=np.float32)\n', (4340, 4366), True, 'import numpy as np\n'), ((5028, 5051), 'numpy.argmax', 'np.argmax', (['iou'], {'axis': '(-1)'}), '(iou, axis=-1)\n', (5037, 5051), True, 'import numpy as np\n'), ((6118, 6152), 'numpy.random.shuffle', 'np.random.shuffle', (['self.annotation'], {}), '(self.annotation)\n', (6135, 6152), True, 'import numpy as np\n'), ((3272, 3314), 'core.dataset.augment.bbox_filter', 'augment.bbox_filter', (['image', 'bboxes', 'labels'], {}), '(image, bboxes, labels)\n', (3291, 3314), False, 'from core.dataset import augment\n'), ((4562, 4590), 'numpy.expand_dims', 'np.expand_dims', (['bboxes_wh', '(1)'], {}), '(bboxes_wh, 1)\n', (4576, 4590), True, 'import numpy as np\n'), ((4713, 4766), 'numpy.minimum', 'np.minimum', (['bboxes_wh_exp[..., 0]', 'self.anchors[:, 0]'], {}), '(bboxes_wh_exp[..., 0], self.anchors[:, 0])\n', (4723, 4766), True, 'import numpy as np\n'), ((4769, 4822), 'numpy.minimum', 'np.minimum', (['bboxes_wh_exp[..., 1]', 'self.anchors[:, 1]'], {}), '(bboxes_wh_exp[..., 1], self.anchors[:, 1])\n', (4779, 4822), True, 'import numpy as np\n'), ((5137, 5179), 'numpy.where', 'np.where', (['(self.mask == best_anchor_idxs[i])'], {}), '(self.mask == best_anchor_idxs[i])\n', (5145, 5179), True, 'import numpy as np\n'), ((5890, 5923), 'numpy.random.choice', 'np.random.choice', (['self.image_size'], {}), '(self.image_size)\n', (5906, 5923), True, 'import numpy as np\n'), ((2475, 2593), 'core.dataset.augment.mosic', 'augment.mosic', (['image', 'bboxes', 'labels', 'image2', 'bboxes2', 'labels2', 'image3', 'bboxes3', 'labels3', 'image4', 'bboxes4', 'labels4'], {}), '(image, bboxes, labels, image2, bboxes2, labels2, image3,\n bboxes3, labels3, image4, bboxes4, labels4)\n', (2488, 2593), False, 'from core.dataset import augment\n'), ((2811, 2840), 'core.dataset.augment.random_distort', 'augment.random_distort', (['image'], {}), '(image)\n', (2833, 2840), False, 'from core.dataset import augment\n'), ((2865, 2896), 'core.dataset.augment.random_grayscale', 'augment.random_grayscale', (['image'], {}), '(image)\n', (2889, 2896), False, 'from core.dataset import augment\n'), ((2929, 2966), 'core.dataset.augment.random_flip_lr', 'augment.random_flip_lr', (['image', 'bboxes'], {}), '(image, bboxes)\n', (2951, 2966), False, 'from core.dataset import augment\n'), ((2999, 3035), 'core.dataset.augment.random_rotate', 'augment.random_rotate', (['image', 'bboxes'], {}), '(image, bboxes)\n', (3020, 3035), False, 'from core.dataset import augment\n'), ((3076, 3170), 'core.dataset.augment.random_crop_and_zoom', 'augment.random_crop_and_zoom', (['image', 'bboxes', 'labels', '(self._image_size, self._image_size)'], {}), '(image, bboxes, labels, (self._image_size, self\n ._image_size))\n', (3104, 3170), False, 'from core.dataset import augment\n'), ((3747, 3763), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (3755, 3763), True, 'import numpy as np\n'), ((3765, 3781), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3773, 3781), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (3833, 3841), True, 'import numpy as np\n'), ((3843, 3857), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (3851, 3857), True, 'import numpy as np\n'), ((2186, 2210), 'numpy.arange', 'np.arange', (['self.num_anno'], {}), '(self.num_anno)\n', (2195, 2210), True, 'import numpy as np\n')]
|
from setuptools import setup
from scrapy_sticky_meta_params import __version__
with open("README.md") as f:
readme = f.read()
setup(
name="scrapy-sticky-meta-params",
version=__version__,
license="MIT license",
description="A spider middleware that forwards meta params through subsequent requests.",
long_description=readme,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/heylouiz/scrapy-sticky-meta-params",
packages=["scrapy_sticky_meta_params"],
platforms=["Any"],
keywords="scrapy meta middleware",
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=["Scrapy>=1.6.0"],
)
|
[
"setuptools.setup"
] |
[((133, 1121), 'setuptools.setup', 'setup', ([], {'name': '"""scrapy-sticky-meta-params"""', 'version': '__version__', 'license': '"""MIT license"""', 'description': '"""A spider middleware that forwards meta params through subsequent requests."""', 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/heylouiz/scrapy-sticky-meta-params"""', 'packages': "['scrapy_sticky_meta_params']", 'platforms': "['Any']", 'keywords': '"""scrapy meta middleware"""', 'include_package_data': '(True)', 'classifiers': "['Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9']", 'install_requires': "['Scrapy>=1.6.0']"}), "(name='scrapy-sticky-meta-params', version=__version__, license=\n 'MIT license', description=\n 'A spider middleware that forwards meta params through subsequent requests.'\n , long_description=readme, long_description_content_type=\n 'text/markdown', author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/heylouiz/scrapy-sticky-meta-params', packages=[\n 'scrapy_sticky_meta_params'], platforms=['Any'], keywords=\n 'scrapy meta middleware', include_package_data=True, classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9'], install_requires=[\n 'Scrapy>=1.6.0'])\n", (138, 1121), False, 'from setuptools import setup\n')]
|
#!/usr/bin/env python
# Simple model of receptors diffusing in and out of synapses.
# Simulation of the Dynamcis with the Euler method.
# This simulates the effect of a sudden change in the pool size
#
# <NAME>, January-April 2017
import numpy as np
from matplotlib import pyplot as plt
# parameters
N = 3 # number of synapses
steps = 10000 # number of time steps to simulate
duration = 10.0 # duration in minutes
change_time = 2.0 # time at which number of pool size changes in minutes
ts = duration/steps # time step of the simulation
beta = 60.0/43.0 # transition rate out of slots in 1/min
delta = 1.0/14.0 # removal rate in 1/min
phi = 2.67 # relative pool size
F = 0.9 # set desired filling fraction
# initializations: the w_i and p are set to their steady state values
s = np.zeros(N)
for i in range(0,N):
s[i] = 40.0 + i*20.0
S = sum(s)
gamma = delta*F*S*phi # production rate set to achieve desired p*
alpha = beta/(phi*S*(1-F)) # set alpha accordingly
P = gamma/delta # total number of receptors in steady state
# variables we want to keep track of to plot them at the end:
# 'u' stands for up-regulation and 'd' stands for down-regulation.
# Up- and down-regulation are simulated simultaneously.
pu = np.zeros(steps) # pool size
pd = np.zeros(steps)
wu = np.zeros([N,steps]) # synaptic weights
wd = np.zeros([N,steps])
ru = np.zeros(steps) # relative change of synaptic weights
rd = np.zeros(steps)
times = np.zeros(steps)
pu[0] = P
pd[0] = P
ru[0] = 1.0
rd[0] = 1.0
for i in range(0,N):
wu[i,0] = F*s[i]
wd[i,0] = F*s[i]
# simulation loop
for t in range(0, steps-1):
if t==round(change_time/ts): # change pool size after some time
pu[t]=2.0*P # double number of receptors in the pool
pd[t]=0.0*P # set number of receptors in the pool to zero
Wu = sum(wu[:,t])
Wd = sum(wd[:,t])
wu[:,t+1] = wu[:,t] + ts * (alpha*pu[t] * (s-wu[:,t]) - beta*wu[:,t])
wd[:,t+1] = wd[:,t] + ts * (alpha*pd[t] * (s-wd[:,t]) - beta*wd[:,t])
pu[t+1] = pu[t] + ts * (beta*Wu - alpha*pu[t]*(S-Wu) - delta*pu[t] + gamma)
pd[t+1] = pd[t] + ts * (beta*Wd - alpha*pd[t]*(S-Wd) - delta*pd[t] + gamma)
ru[t+1] = wu[0,t+1]/wu[0,0]*100.0
rd[t+1] = wd[0,t+1]/wd[0,0]*100.0
times[t+1] = ts*(t+1)
# show results
f = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.gca().set_prop_cycle(plt.cycler('color', ['blue', 'green', 'red']))
[line1, line2, line3] = plt.plot(times, np.transpose(wu))
plt.plot(times, np.transpose(wd), ls='dotted')
plt.legend((line3, line2, line1), (r'$w_3$', r'$w_2$', r'$w_1$'), loc=1, fontsize=12)
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f.savefig("Fig4A.pdf", bbox_inches='tight')
f2 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, pu, "k")
plt.plot(times, pd, "k", ls='dotted')
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel('pool size', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f2.savefig("Fig4C.pdf", bbox_inches='tight')
f3 = plt.figure(figsize=(4,3))
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
plt.rc('font', **font)
plt.rc('font', serif='Times New Roman')
plt.plot(times, ru, "k")
plt.plot(times, rd, "k", ls='dotted')
plt.axis((0.0, 10.0, 40.0, 140.0))
plt.xlabel(r'$t \; [{\rm min}]$', fontsize=12)
plt.ylabel(r'$w_i(t)/w_i(0) \quad [\%]$', fontsize=12)
plt.title(r'$F=0.9$', fontsize=12)
plt.show()
f3.savefig("Fig4B.pdf", bbox_inches='tight')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.cycler",
"numpy.transpose",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((837, 848), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (845, 848), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1302, 1309), True, 'import numpy as np\n'), ((1334, 1349), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1342, 1349), True, 'import numpy as np\n'), ((1355, 1375), 'numpy.zeros', 'np.zeros', (['[N, steps]'], {}), '([N, steps])\n', (1363, 1375), True, 'import numpy as np\n'), ((1402, 1422), 'numpy.zeros', 'np.zeros', (['[N, steps]'], {}), '([N, steps])\n', (1410, 1422), True, 'import numpy as np\n'), ((1427, 1442), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1435, 1442), True, 'import numpy as np\n'), ((1493, 1508), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1501, 1508), True, 'import numpy as np\n'), ((1517, 1532), 'numpy.zeros', 'np.zeros', (['steps'], {}), '(steps)\n', (1525, 1532), True, 'import numpy as np\n'), ((2359, 2385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (2369, 2385), True, 'from matplotlib import pyplot as plt\n'), ((2465, 2487), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (2471, 2487), True, 'from matplotlib import pyplot as plt\n'), ((2488, 2527), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (2494, 2527), True, 'from matplotlib import pyplot as plt\n'), ((2706, 2792), 'matplotlib.pyplot.legend', 'plt.legend', (['(line3, line2, line1)', "('$w_3$', '$w_2$', '$w_1$')"], {'loc': '(1)', 'fontsize': '(12)'}), "((line3, line2, line1), ('$w_3$', '$w_2$', '$w_1$'), loc=1,\n fontsize=12)\n", (2716, 2792), True, 'from matplotlib import pyplot as plt\n'), ((2792, 2839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (2802, 2839), True, 'from matplotlib import pyplot as plt\n'), ((2839, 2871), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_i$"""'], {'fontsize': '(12)'}), "('$w_i$', fontsize=12)\n", (2849, 2871), True, 'from matplotlib import pyplot as plt\n'), ((2873, 2906), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (2882, 2906), True, 'from matplotlib import pyplot as plt\n'), ((2908, 2918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2916, 2918), True, 'from matplotlib import pyplot as plt\n'), ((2969, 2995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (2979, 2995), True, 'from matplotlib import pyplot as plt\n'), ((3075, 3097), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (3081, 3097), True, 'from matplotlib import pyplot as plt\n'), ((3098, 3137), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (3104, 3137), True, 'from matplotlib import pyplot as plt\n'), ((3138, 3162), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'pu', '"""k"""'], {}), "(times, pu, 'k')\n", (3146, 3162), True, 'from matplotlib import pyplot as plt\n'), ((3163, 3200), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'pd', '"""k"""'], {'ls': '"""dotted"""'}), "(times, pd, 'k', ls='dotted')\n", (3171, 3200), True, 'from matplotlib import pyplot as plt\n'), ((3201, 3248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (3211, 3248), True, 'from matplotlib import pyplot as plt\n'), ((3248, 3284), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pool size"""'], {'fontsize': '(12)'}), "('pool size', fontsize=12)\n", (3258, 3284), True, 'from matplotlib import pyplot as plt\n'), ((3285, 3318), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (3294, 3318), True, 'from matplotlib import pyplot as plt\n'), ((3320, 3330), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3328, 3330), True, 'from matplotlib import pyplot as plt\n'), ((3382, 3408), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (3392, 3408), True, 'from matplotlib import pyplot as plt\n'), ((3488, 3510), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (3494, 3510), True, 'from matplotlib import pyplot as plt\n'), ((3511, 3550), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'serif': '"""Times New Roman"""'}), "('font', serif='Times New Roman')\n", (3517, 3550), True, 'from matplotlib import pyplot as plt\n'), ((3552, 3576), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'ru', '"""k"""'], {}), "(times, ru, 'k')\n", (3560, 3576), True, 'from matplotlib import pyplot as plt\n'), ((3577, 3614), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'rd', '"""k"""'], {'ls': '"""dotted"""'}), "(times, rd, 'k', ls='dotted')\n", (3585, 3614), True, 'from matplotlib import pyplot as plt\n'), ((3615, 3649), 'matplotlib.pyplot.axis', 'plt.axis', (['(0.0, 10.0, 40.0, 140.0)'], {}), '((0.0, 10.0, 40.0, 140.0))\n', (3623, 3649), True, 'from matplotlib import pyplot as plt\n'), ((3650, 3697), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t \\\\; [{\\\\rm min}]$"""'], {'fontsize': '(12)'}), "('$t \\\\; [{\\\\rm min}]$', fontsize=12)\n", (3660, 3697), True, 'from matplotlib import pyplot as plt\n'), ((3697, 3752), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$w_i(t)/w_i(0) \\\\quad [\\\\%]$"""'], {'fontsize': '(12)'}), "('$w_i(t)/w_i(0) \\\\quad [\\\\%]$', fontsize=12)\n", (3707, 3752), True, 'from matplotlib import pyplot as plt\n'), ((3752, 3785), 'matplotlib.pyplot.title', 'plt.title', (['"""$F=0.9$"""'], {'fontsize': '(12)'}), "('$F=0.9$', fontsize=12)\n", (3761, 3785), True, 'from matplotlib import pyplot as plt\n'), ((3787, 3797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3795, 3797), True, 'from matplotlib import pyplot as plt\n'), ((2554, 2599), 'matplotlib.pyplot.cycler', 'plt.cycler', (['"""color"""', "['blue', 'green', 'red']"], {}), "('color', ['blue', 'green', 'red'])\n", (2564, 2599), True, 'from matplotlib import pyplot as plt\n'), ((2641, 2657), 'numpy.transpose', 'np.transpose', (['wu'], {}), '(wu)\n', (2653, 2657), True, 'import numpy as np\n'), ((2675, 2691), 'numpy.transpose', 'np.transpose', (['wd'], {}), '(wd)\n', (2687, 2691), True, 'import numpy as np\n'), ((2529, 2538), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2536, 2538), True, 'from matplotlib import pyplot as plt\n')]
|
import subprocess
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import scipy
from scipy.sparse.linalg import lsqr
import time
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.widgets import Slider, RadioButtons
from .geomtools import *
from .emcoords import *
from ripser import ripser
import warnings
"""#########################################
Main Circular Coordinates Class
#########################################"""
SCATTER_SIZE = 50
class CircularCoords(EMCoords):
def __init__(self, X, n_landmarks, distance_matrix=False, prime=41, maxdim=1, verbose=False):
"""
Parameters
----------
X: ndarray(N, d)
A point cloud with N points in d dimensions
n_landmarks: int
Number of landmarks to use
distance_matrix: boolean
If true, treat X as a distance matrix instead of a point cloud
prime : int
Field coefficient with which to compute rips on landmarks
maxdim : int
Maximum dimension of homology. Only dimension 1 is needed for circular coordinates,
but it may be of interest to see other dimensions (e.g. for a torus)
"""
EMCoords.__init__(self, X, n_landmarks, distance_matrix, prime, maxdim, verbose)
self.type_ = "circ"
def get_coordinates(self, perc = 0.99, do_weighted = False, cocycle_idx = [0], partunity_fn = partunity_linear):
"""
Perform circular coordinates via persistent cohomology of
sparse filtrations (<NAME> 2018)
Parameters
----------
perc : float
Percent coverage
do_weighted : boolean
Whether to make a weighted cocycle on the representatives
cocycle_idx : list
Add the cocycles together in this list
partunity_fn: (dist_land_data, r_cover) -> phi
A function from the distances of each landmark to a bump function
"""
## Step 1: Come up with the representative cocycle as a formal sum
## of the chosen cocycles
n_landmarks = self.n_landmarks_
n_data = self.X_.shape[0]
dgm1 = self.dgms_[1]/2.0 #Need so that Cech is included in rips
cohomdeath = -np.inf
cohombirth = np.inf
cocycle = np.zeros((0, 3))
prime = self.prime_
for k in range(len(cocycle_idx)):
cocycle = add_cocycles(cocycle, self.cocycles_[1][cocycle_idx[k]], p=prime)
cohomdeath = max(cohomdeath, dgm1[cocycle_idx[k], 0])
cohombirth = min(cohombirth, dgm1[cocycle_idx[k], 1])
## Step 2: Determine radius for balls
dist_land_data = self.dist_land_data_
dist_land_land = self.dist_land_land_
coverage = np.max(np.min(dist_land_data, 1))
r_cover = (1-perc)*max(cohomdeath, coverage) + perc*cohombirth
self.r_cover_ = r_cover # Store covering radius for reference
if self.verbose:
print("r_cover = %.3g"%r_cover)
## Step 3: Setup coboundary matrix, delta_0, for Cech_{r_cover }
## and use it to find a projection of the cocycle
## onto the image of delta0
#Lift to integer cocycle
val = np.array(cocycle[:, 2])
val[val > (prime-1)/2] -= prime
Y = np.zeros((n_landmarks, n_landmarks))
Y[cocycle[:, 0], cocycle[:, 1]] = val
Y = Y + Y.T
#Select edges that are under the threshold
[I, J] = np.meshgrid(np.arange(n_landmarks), np.arange(n_landmarks))
I = I[np.triu_indices(n_landmarks, 1)]
J = J[np.triu_indices(n_landmarks, 1)]
Y = Y[np.triu_indices(n_landmarks, 1)]
idx = np.arange(len(I))
idx = idx[dist_land_land[I, J] < 2*r_cover]
I = I[idx]
J = J[idx]
Y = Y[idx]
NEdges = len(I)
R = np.zeros((NEdges, 2))
R[:, 0] = J
R[:, 1] = I
#Make a flat array of NEdges weights parallel to the rows of R
if do_weighted:
W = dist_land_land[I, J]
else:
W = np.ones(NEdges)
delta0 = make_delta0(R)
wSqrt = np.sqrt(W).flatten()
WSqrt = scipy.sparse.spdiags(wSqrt, 0, len(W), len(W))
A = WSqrt*delta0
b = WSqrt.dot(Y)
tau = lsqr(A, b)[0]
theta = np.zeros((NEdges, 3))
theta[:, 0] = J
theta[:, 1] = I
theta[:, 2] = -delta0.dot(tau)
theta = add_cocycles(cocycle, theta, real=True)
## Step 4: Create the open covering U = {U_1,..., U_{s+1}} and partition of unity
U = dist_land_data < r_cover
phi = np.zeros_like(dist_land_data)
phi[U] = partunity_fn(dist_land_data[U], r_cover)
# Compute the partition of unity
# varphi_j(b) = phi_j(b)/(phi_1(b) + ... + phi_{n_landmarks}(b))
denom = np.sum(phi, 0)
nzero = np.sum(denom == 0)
if nzero > 0:
warnings.warn("There are %i point not covered by a landmark"%nzero)
denom[denom == 0] = 1
varphi = phi / denom[None, :]
# To each data point, associate the index of the first open set it belongs to
ball_indx = np.argmax(U, 0)
## Step 5: From U_1 to U_{s+1} - (U_1 \cup ... \cup U_s), apply classifying map
# compute all transition functions
theta_matrix = np.zeros((n_landmarks, n_landmarks))
I = np.array(theta[:, 0], dtype = np.int64)
J = np.array(theta[:, 1], dtype = np.int64)
theta = theta[:, 2]
theta = np.mod(theta + 0.5, 1) - 0.5
theta_matrix[I, J] = theta
theta_matrix[J, I] = -theta
class_map = -tau[ball_indx]
for i in range(n_data):
class_map[i] += theta_matrix[ball_indx[i], :].dot(varphi[:, i])
thetas = np.mod(2*np.pi*class_map, 2*np.pi)
return thetas
def update_colors(self):
if len(self.selected) > 0:
idxs = np.array(list(self.selected))
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
## Step 2: Update circular coordinates on point cloud
thetas = self.coords
c = plt.get_cmap('magma_r')
thetas -= np.min(thetas)
thetas /= np.max(thetas)
thetas = np.array(np.round(thetas*255), dtype=int)
C = c(thetas)
if self.Y.shape[1] == 2:
self.coords_scatter.set_color(C)
else:
self.coords_scatter._facecolor3d = C
self.coords_scatter._edgecolor3d = C
else:
self.selected_plot.set_offsets(np.zeros((0, 2)))
if self.Y.shape[1] == 2:
self.coords_scatter.set_color('C0')
else:
self.coords_scatter._facecolor3d = 'C0'
self.coords_scatter._edgecolor3d = 'C0'
def recompute_coords_dimred(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
colors accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
self.update_colors()
def onpick_dimred(self, evt):
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_dimred(clicked)
self.ax_persistence.figure.canvas.draw()
self.ax_coords.figure.canvas.draw()
return True
def on_perc_slider_move_dimred(self, evt):
self.recompute_coords_dimred()
def on_partunity_selector_change_dimred(self, evt):
self.recompute_coords_dimred()
def plot_dimreduced(self, Y, using_jupyter = True, init_params = {'cocycle_idxs':[], 'perc':0.99, 'partunity_fn':partunity_linear, 'azim':-60, 'elev':30}, dpi=None):
"""
Do an interactive plot of circular coordinates, coloring a dimension
reduced version of the point cloud by the circular coordinates
Parameters
----------
Y: ndarray(N, d)
A 2D point cloud with the same number of points as X
using_jupyter: boolean
Whether this is an interactive plot in jupyter
init_params: dict
The intial parameters. Optional fields of the dictionary are as follows:
{
cocycle_idxs: list of int
A list of cocycles to start with
u: ndarray(3, float)
The initial stereographic north pole
perc: float
The percent coverage to start with
partunity_fn: (dist_land_data, r_cover) -> phi
The partition of unity function to start with
azim: float
Initial azimuth for 3d plots
elev: float
Initial elevation for 3d plots
}
dpi: int
Dot pixels per inch
"""
if Y.shape[1] < 2 or Y.shape[1] > 3:
raise Exception("Dimension reduced version must be in 2D or 3D")
self.Y = Y
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(2, 1)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*2, DREIMAC_FIG_RES), dpi=dpi)
## Step 1: Plot H1
self.ax_persistence = fig.add_subplot(121)
self.setup_ax_persistence(y_compress=1.37)
fig.canvas.mpl_connect('pick_event', self.onpick_dimred)
self.selected = set([])
## Step 2: Setup window for choosing coverage / partition of unity type
## and for displaying the chosen cocycle
self.perc_slider, self.partunity_selector, self.selected_cocycle_text, _ = EMCoords.setup_param_chooser_gui(self, fig, 0.25, 0.75, 0.4, 0.5, init_params)
self.perc_slider.on_changed(self.on_perc_slider_move_dimred)
self.partunity_selector.on_clicked(self.on_partunity_selector_change_dimred)
## Step 3: Setup axis for coordinates
if Y.shape[1] == 3:
self.ax_coords = fig.add_subplot(122, projection='3d')
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], Y[:, 2], s=SCATTER_SIZE, cmap='magma_r')
set_3dplot_equalaspect(self.ax_coords, Y)
if 'azim' in init_params:
self.ax_coords.azim = init_params['azim']
if 'elev' in init_params:
self.ax_coords.elev = init_params['elev']
else:
self.ax_coords = fig.add_subplot(122)
self.coords_scatter = self.ax_coords.scatter(Y[:, 0], Y[:, 1], s=SCATTER_SIZE, cmap='magma_r')
self.ax_coords.set_aspect('equal')
self.ax_coords.set_title("Dimension Reduced Point Cloud")
if len(init_params['cocycle_idxs']) > 0:
# If some initial cocycle indices were chosen, update
# the plot
self.recompute_coords_dimred(init_params['cocycle_idxs'])
plt.show()
def get_selected_dimreduced_info(self):
"""
Return information about what the user selected and their viewpoint in
the interactive dimension reduced plot
Returns
-------
{
'partunity_fn': (dist_land_data, r_cover) -> phi
The selected function handle for the partition of unity
'cocycle_idxs':ndarray(dtype = int)
Indices of the selected cocycles,
'perc': float
The selected percent coverage,
'azim':float
Azumith if viewing in 3D
'elev':float
Elevation if viewing in 3D
}
"""
ret = EMCoords.get_selected_info(self)
if self.Y.shape[1] == 3:
ret['azim'] = self.ax_coords.azim
ret['elev'] = self.ax_coords.elev
return ret
def update_plot_torii(self, circ_idx):
"""
Update a joint plot of circular coordinates, switching between
2D and 3D modes if necessary
Parameters
----------
circ_idx: int
Index of the circular coordinates that have
been updated
"""
N = self.plots_in_one
n_plots = len(self.plots)
## Step 1: Figure out the index of the involved plot
plot_idx = int(np.floor(circ_idx/N))
plot = self.plots[plot_idx]
## Step 2: Extract the circular coordinates from all
## plots that have at least one cochain representative selected
labels = []
coords = []
for i in range(N):
idx = plot_idx*N + i
c_info = self.coords_info[idx]
if len(c_info['selected']) > 0:
# Only include circular coordinates that have at least
# one persistence dot selected
coords.append(c_info['coords'])
labels.append("Coords {}".format(idx))
## Step 3: Adjust the plot accordingly
if len(labels) > 0:
X = np.array([])
if len(labels) == 1:
# Just a single coordinate; put it on a circle
coords = np.array(coords).flatten()
X = np.array([np.cos(coords), np.sin(coords)]).T
else:
X = np.array(coords).T
updating_axes = False
if X.shape[1] == 3 and plot['axis_2d']:
# Need to switch from 2D to 3D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = False
updating_axes = True
elif X.shape[1] == 2 and not plot['axis_2d']:
# Need to switch from 3D to 2D coordinates
self.fig.delaxes(plot['ax'])
plot['axis_2d'] = True
updating_axes = True
if X.shape[1] == 3:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx, projection='3d')
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], X[:, 2], s=SCATTER_SIZE, c=self.coords_colors)
plot['ax'].set_title('Joint 3D Plot')
else:
plot['coords_scatter'].set_offsets(X)
set_pi_axis_labels(plot['ax'], labels)
else:
if updating_axes:
plot['ax'] = self.fig.add_subplot(2, n_plots+1, n_plots+3+plot_idx)
plot['coords_scatter'] = plot['ax'].scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=self.coords_colors)
else:
plot['coords_scatter'].set_offsets(X)
if len(labels) > 1:
set_pi_axis_labels(plot['ax'], labels)
plot['ax'].set_title('Joint 2D Plot')
else:
plot['ax'].set_xlabel('')
plot['ax'].set_xlim([-1.1, 1.1])
plot['ax'].set_ylabel('')
plot['ax'].set_ylim([-1.1, 1.1])
plot['ax'].set_title(labels[0])
else:
X = np.array([])
if plot['axis_2d']:
X = -2*np.ones((self.X_.shape[0], 2))
else:
X = -2*np.ones((self.X_.shape[0], 3))
plot['coords_scatter'].set_offsets(X)
def recompute_coords_torii(self, clicked = []):
"""
Toggle including a cocycle from a set of points in the
persistence diagram, and update the circular coordinates
joint torii plots accordingly
Parameters
----------
clicked: list of int
Indices to toggle
"""
EMCoords.recompute_coords(self, clicked)
# Save away circular coordinates
self.coords_info[self.selected_coord_idx]['selected'] = self.selected
self.coords_info[self.selected_coord_idx]['coords'] = self.coords
self.update_plot_torii(self.selected_coord_idx)
def onpick_torii(self, evt):
"""
Handle a pick even for the torii plot
"""
if evt.artist == self.dgmplot:
## Step 1: Highlight point on persistence diagram
clicked = set(evt.ind.tolist())
self.recompute_coords_torii(clicked)
self.ax_persistence.figure.canvas.draw()
self.fig.canvas.draw()
return True
def select_torii_coord(self, idx):
"""
Select a particular circular coordinate plot and un-select others
Parameters
----------
idx: int
Index of the plot to select
"""
for i, coordsi in enumerate(self.coords_info):
if i == idx:
self.selected_coord_idx = idx
coordsi = self.coords_info[idx]
# Swap in the appropriate GUI objects for selection
self.selected = coordsi['selected']
self.selected_cocycle_text = coordsi['selected_cocycle_text']
self.perc_slider = coordsi['perc_slider']
self.partunity_selector = coordsi['partunity_selector']
self.persistence_text_labels = coordsi['persistence_text_labels']
self.coords = coordsi['coords']
coordsi['button'].color = 'red'
for j in np.array(list(self.selected)):
self.persistence_text_labels[j].set_text("%i"%j)
idxs = np.array(list(self.selected), dtype=int)
if idxs.size > 0:
self.selected_plot.set_offsets(self.dgm1_lifetime[idxs, :])
else:
self.selected_plot.set_offsets(np.array([[np.nan]*2]))
else:
coordsi['button'].color = 'gray'
self.ax_persistence.set_title("H1 Cocycle Selection: Coordinate {}".format(idx))
def on_perc_slider_move_torii(self, evt, idx):
"""
React to a change in coverage
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selected) > 0:
self.recompute_coords_torii()
def on_partunity_selector_change_torii(self, evt, idx):
"""
React to a change in partition of unity type for
a particular circular coordinate, and recompute the
coordinates if they aren't trivial
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
if len(self.selectd) > 0:
self.recompute_coords_torii()
def on_click_torii_button(self, evt, idx):
"""
React to a click event, and change the selected
circular coordinate if necessary
"""
if not self.selected_coord_idx == idx:
self.select_torii_coord(idx)
def plot_torii(self, f, using_jupyter=True, zoom=1, dpi=None, coords_info=2, plots_in_one = 2, lowerleft_plot = None, lowerleft_3d=False):
"""
Do an interactive plot of circular coordinates, where points are drawn on S1,
on S1 x S1, or S1 x S1 x S1
Parameters
----------
f: Display information for the points
On of three options:
1) A scalar function with which to color the points, represented
as a 1D array
2) A list of colors with which to color the points, specified as
an Nx3 array
3) A list of images to place at each location
using_jupyter: boolean
Whether this is an interactive plot in jupyter
zoom: int
If using patches, the factor by which to zoom in on them
dpi: int
Dot pixels per inch
coords_info: Information about how to perform circular coordinates. There will
be as many plots as the ceil of the number of circular coordinates, and
they will be plotted pairwise.
This parameter is one of two options
1) An int specifying the number of different circular coordinate
functions to compute
2) A list of dictionaries with pre-specified initial parameters for
each circular coordinate. Each dictionary has the following keys:
{
'cocycle_reps': dictionary
A dictionary of cocycle representatives, with the key
as the cocycle index, and the value as the coefficient
TODO: Finish update to support this instead of a set
'perc': float
The percent coverage to start with,
'partunity_fn': (dist_land_data, r_cover) -> phi
The partition of unity function to start with
}
plots_in_one: int
The max number of circular coordinates to put in one plot
lowerleft_plot: function(matplotlib axis)
A function that plots something in the lower left
lowerleft_3d: boolean
Whether the lower left plot is 3D
"""
if plots_in_one < 2 or plots_in_one > 3:
raise Exception("Cannot be fewer than 2 or more than 3 circular coordinates in one plot")
self.plots_in_one = plots_in_one
self.f = f
## Step 1: Figure out how many plots are needed to accommodate all
## circular coordinates
n_plots = 1
if type(coords_info) is int:
n_plots = int(np.ceil(coords_info/plots_in_one))
coords_info = []
else:
n_plots = int(np.ceil(len(coords_info)/plots_in_one))
while len(coords_info) < n_plots*plots_in_one:
coords_info.append({'selected':set([]), 'perc':0.99, 'partunity_fn':partunity_linear})
self.selecting_idx = 0 # Index of circular coordinate which is currently being selected
if using_jupyter and in_notebook():
import matplotlib
matplotlib.use("nbAgg")
if not dpi:
dpi = compute_dpi(n_plots+1, 2)
fig = plt.figure(figsize=(DREIMAC_FIG_RES*(n_plots+1), DREIMAC_FIG_RES*2), dpi=dpi)
self.dpi = dpi
self.fig = fig
## Step 2: Setup H1 plot, along with initially empty text labels
## for each persistence point
self.ax_persistence = fig.add_subplot(2, n_plots+1, 1)
self.setup_ax_persistence()
fig.canvas.mpl_connect('pick_event', self.onpick_torii)
## Step 2: Setup windows for choosing coverage / partition of unity type
## and for displaying the chosen cocycle for each circular coordinate.
## Also store variables for selecting cocycle representatives
width = 1/(n_plots+1)
height = 1/plots_in_one
partunity_keys = tuple(PARTUNITY_FNS.keys())
for i in range(n_plots):
xstart = width*(i+1.4)
for j in range(plots_in_one):
idx = i*plots_in_one+j
# Setup plots and state for a particular circular coordinate
ystart = 0.8 - 0.4*height*j
coords_info[idx]['perc_slider'], coords_info[idx]['partunity_selector'], coords_info[idx]['selected_cocycle_text'], coords_info[idx]['button'] = self.setup_param_chooser_gui(fig, xstart, ystart, width, height, coords_info[idx], idx)
coords_info[idx]['perc_slider'].on_changed(callback_factory(self.on_perc_slider_move_torii, idx))
coords_info[idx]['partunity_selector'].on_clicked = callback_factory(self.on_partunity_selector_change_torii, idx)
coords_info[idx]['button'].on_clicked(callback_factory(self.on_click_torii_button, idx))
dgm = self.dgm1_lifetime
coords_info[idx]['persistence_text_labels'] = [self.ax_persistence.text(dgm[i, 0], dgm[i, 1], '') for i in range(dgm.shape[0])]
coords_info[idx]['idx'] = idx
coords_info[idx]['coords'] = np.zeros(self.X_.shape[0])
self.coords_info = coords_info
## Step 3: Figure out colors of coordinates
self.coords_colors = None
if not (type(f) is list):
# Figure out colormap if images aren't passed along
self.coords_colors = f
if f.size == self.X_.shape[0]:
# Scalar function, so need to apply colormap
c = plt.get_cmap('magma_r')
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
C = c(np.array(np.round(fscaled*255), dtype=np.int32))
self.coords_colors = C[:, 0:3]
## Step 4: Setup plots
plots = []
self.n_plots = n_plots
for i in range(n_plots):
# 2D by default, but can change to 3D later
ax = fig.add_subplot(2, n_plots+1, n_plots+3+i)
pix = -2*np.ones(self.X_.shape[0])
plot = {}
plot['ax'] = ax
plot['coords_scatter'] = ax.scatter(pix, pix, s=SCATTER_SIZE, c=self.coords_colors) # Scatterplot for circular coordinates
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
plot['axis_2d'] = True
plot['patch_boxes'] = [] # Array of image patch display objects
plots.append(plot)
self.plots = plots
## Step 5: Initialize plots with information passed along
for i in reversed(range(len(coords_info))):
self.select_torii_coord(i)
self.recompute_coords_torii([])
## Step 6: Plot something in the lower left corner if desired
if lowerleft_plot:
if lowerleft_3d:
ax = fig.add_subplot(2, n_plots+1, n_plots+2, projection='3d')
else:
ax = fig.add_subplot(2, n_plots+1, n_plots+2)
lowerleft_plot(ax)
plt.show()
def do_two_circle_test():
"""
Test interactive plotting with two noisy circles of different sizes
"""
prime = 41
np.random.seed(2)
N = 500
X = np.zeros((N*2, 2))
t = np.linspace(0, 1, N+1)[0:N]**1.2
t = 2*np.pi*t
X[0:N, 0] = np.cos(t)
X[0:N, 1] = np.sin(t)
X[N::, 0] = 2*np.cos(t) + 4
X[N::, 1] = 2*np.sin(t) + 4
perm = np.random.permutation(X.shape[0])
X = X[perm, :]
X = X + 0.2*np.random.randn(X.shape[0], 2)
f = np.concatenate((t, t + np.max(t)))
f = f[perm]
fscaled = f - np.min(f)
fscaled = fscaled/np.max(fscaled)
c = plt.get_cmap('magma_r')
C = c(np.array(np.round(fscaled*255), dtype=np.int32))[:, 0:3]
#plt.scatter(X[:, 0], X[:, 1], s=SCATTER_SIZE, c=C)
cc = CircularCoords(X, 100, prime = prime)
#cc.plot_dimreduced(X, using_jupyter=False)
cc.plot_torii(f, coords_info=2, plots_in_one=3)
def do_torus_test():
"""
Test interactive plotting with a torus
"""
prime = 41
np.random.seed(2)
N = 10000
R = 5
r = 2
X = np.zeros((N, 3))
s = np.random.rand(N)*2*np.pi
t = np.random.rand(N)*2*np.pi
X[:, 0] = (R + r*np.cos(s))*np.cos(t)
X[:, 1] = (R + r*np.cos(s))*np.sin(t)
X[:, 2] = r*np.sin(s)
cc = CircularCoords(X, 100, prime=prime)
f = s
def plot_torus(ax):
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=f, cmap='magma_r')
set_3dplot_equalaspect(ax, X)
cc.plot_torii(f, coords_info=2, plots_in_one=2, lowerleft_plot=plot_torus, lowerleft_3d=True)
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"numpy.floor",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.ceil",
"numpy.triu_indices",
"numpy.mod",
"numpy.min",
"matplotlib.use",
"numpy.cos",
"numpy.random.permutation",
"scipy.sparse.linalg.lsqr",
"numpy.zeros",
"numpy.array",
"numpy.random.rand",
"warnings.warn",
"numpy.sqrt"
] |
[((26405, 26422), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (26419, 26422), True, 'import numpy as np\n'), ((26443, 26463), 'numpy.zeros', 'np.zeros', (['(N * 2, 2)'], {}), '((N * 2, 2))\n', (26451, 26463), True, 'import numpy as np\n'), ((26537, 26546), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (26543, 26546), True, 'import numpy as np\n'), ((26563, 26572), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (26569, 26572), True, 'import numpy as np\n'), ((26648, 26681), 'numpy.random.permutation', 'np.random.permutation', (['X.shape[0]'], {}), '(X.shape[0])\n', (26669, 26681), True, 'import numpy as np\n'), ((26882, 26905), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (26894, 26905), True, 'import matplotlib.pyplot as plt\n'), ((27281, 27298), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (27295, 27298), True, 'import numpy as np\n'), ((27341, 27357), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (27349, 27357), True, 'import numpy as np\n'), ((2406, 2422), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (2414, 2422), True, 'import numpy as np\n'), ((3340, 3363), 'numpy.array', 'np.array', (['cocycle[:, 2]'], {}), '(cocycle[:, 2])\n', (3348, 3363), True, 'import numpy as np\n'), ((3416, 3452), 'numpy.zeros', 'np.zeros', (['(n_landmarks, n_landmarks)'], {}), '((n_landmarks, n_landmarks))\n', (3424, 3452), True, 'import numpy as np\n'), ((3966, 3987), 'numpy.zeros', 'np.zeros', (['(NEdges, 2)'], {}), '((NEdges, 2))\n', (3974, 3987), True, 'import numpy as np\n'), ((4432, 4453), 'numpy.zeros', 'np.zeros', (['(NEdges, 3)'], {}), '((NEdges, 3))\n', (4440, 4453), True, 'import numpy as np\n'), ((4748, 4777), 'numpy.zeros_like', 'np.zeros_like', (['dist_land_data'], {}), '(dist_land_data)\n', (4761, 4777), True, 'import numpy as np\n'), ((4967, 4981), 'numpy.sum', 'np.sum', (['phi', '(0)'], {}), '(phi, 0)\n', (4973, 4981), True, 'import numpy as np\n'), ((4998, 5016), 'numpy.sum', 'np.sum', (['(denom == 0)'], {}), '(denom == 0)\n', (5004, 5016), True, 'import numpy as np\n'), ((5298, 5313), 'numpy.argmax', 'np.argmax', (['U', '(0)'], {}), '(U, 0)\n', (5307, 5313), True, 'import numpy as np\n'), ((5478, 5514), 'numpy.zeros', 'np.zeros', (['(n_landmarks, n_landmarks)'], {}), '((n_landmarks, n_landmarks))\n', (5486, 5514), True, 'import numpy as np\n'), ((5527, 5564), 'numpy.array', 'np.array', (['theta[:, 0]'], {'dtype': 'np.int64'}), '(theta[:, 0], dtype=np.int64)\n', (5535, 5564), True, 'import numpy as np\n'), ((5579, 5616), 'numpy.array', 'np.array', (['theta[:, 1]'], {'dtype': 'np.int64'}), '(theta[:, 1], dtype=np.int64)\n', (5587, 5616), True, 'import numpy as np\n'), ((5928, 5968), 'numpy.mod', 'np.mod', (['(2 * np.pi * class_map)', '(2 * np.pi)'], {}), '(2 * np.pi * class_map, 2 * np.pi)\n', (5934, 5968), True, 'import numpy as np\n'), ((9551, 9618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(DREIMAC_FIG_RES * 2, DREIMAC_FIG_RES)', 'dpi': 'dpi'}), '(figsize=(DREIMAC_FIG_RES * 2, DREIMAC_FIG_RES), dpi=dpi)\n', (9561, 9618), True, 'import matplotlib.pyplot as plt\n'), ((11293, 11303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11301, 11303), True, 'import matplotlib.pyplot as plt\n'), ((22477, 22564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(DREIMAC_FIG_RES * (n_plots + 1), DREIMAC_FIG_RES * 2)', 'dpi': 'dpi'}), '(figsize=(DREIMAC_FIG_RES * (n_plots + 1), DREIMAC_FIG_RES * 2),\n dpi=dpi)\n', (22487, 22564), True, 'import matplotlib.pyplot as plt\n'), ((26260, 26270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26268, 26270), True, 'import matplotlib.pyplot as plt\n'), ((26826, 26835), 'numpy.min', 'np.min', (['f'], {}), '(f)\n', (26832, 26835), True, 'import numpy as np\n'), ((26858, 26873), 'numpy.max', 'np.max', (['fscaled'], {}), '(fscaled)\n', (26864, 26873), True, 'import numpy as np\n'), ((27458, 27467), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (27464, 27467), True, 'import numpy as np\n'), ((27500, 27509), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (27506, 27509), True, 'import numpy as np\n'), ((27526, 27535), 'numpy.sin', 'np.sin', (['s'], {}), '(s)\n', (27532, 27535), True, 'import numpy as np\n'), ((2878, 2903), 'numpy.min', 'np.min', (['dist_land_data', '(1)'], {}), '(dist_land_data, 1)\n', (2884, 2903), True, 'import numpy as np\n'), ((3599, 3621), 'numpy.arange', 'np.arange', (['n_landmarks'], {}), '(n_landmarks)\n', (3608, 3621), True, 'import numpy as np\n'), ((3623, 3645), 'numpy.arange', 'np.arange', (['n_landmarks'], {}), '(n_landmarks)\n', (3632, 3645), True, 'import numpy as np\n'), ((3661, 3692), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3676, 3692), True, 'import numpy as np\n'), ((3708, 3739), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3723, 3739), True, 'import numpy as np\n'), ((3755, 3786), 'numpy.triu_indices', 'np.triu_indices', (['n_landmarks', '(1)'], {}), '(n_landmarks, 1)\n', (3770, 3786), True, 'import numpy as np\n'), ((4190, 4205), 'numpy.ones', 'np.ones', (['NEdges'], {}), '(NEdges)\n', (4197, 4205), True, 'import numpy as np\n'), ((4402, 4412), 'scipy.sparse.linalg.lsqr', 'lsqr', (['A', 'b'], {}), '(A, b)\n', (4406, 4412), False, 'from scipy.sparse.linalg import lsqr\n'), ((5051, 5120), 'warnings.warn', 'warnings.warn', (["('There are %i point not covered by a landmark' % nzero)"], {}), "('There are %i point not covered by a landmark' % nzero)\n", (5064, 5120), False, 'import warnings\n'), ((5663, 5685), 'numpy.mod', 'np.mod', (['(theta + 0.5)', '(1)'], {}), '(theta + 0.5, 1)\n', (5669, 5685), True, 'import numpy as np\n'), ((6287, 6310), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (6299, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6347), 'numpy.min', 'np.min', (['thetas'], {}), '(thetas)\n', (6339, 6347), True, 'import numpy as np\n'), ((6370, 6384), 'numpy.max', 'np.max', (['thetas'], {}), '(thetas)\n', (6376, 6384), True, 'import numpy as np\n'), ((9457, 9480), 'matplotlib.use', 'matplotlib.use', (['"""nbAgg"""'], {}), "('nbAgg')\n", (9471, 9480), False, 'import matplotlib\n'), ((12651, 12673), 'numpy.floor', 'np.floor', (['(circ_idx / N)'], {}), '(circ_idx / N)\n', (12659, 12673), True, 'import numpy as np\n'), ((13342, 13354), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13350, 13354), True, 'import numpy as np\n'), ((15418, 15430), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15426, 15430), True, 'import numpy as np\n'), ((22375, 22398), 'matplotlib.use', 'matplotlib.use', (['"""nbAgg"""'], {}), "('nbAgg')\n", (22389, 22398), False, 'import matplotlib\n'), ((26470, 26494), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(N + 1)'], {}), '(0, 1, N + 1)\n', (26481, 26494), True, 'import numpy as np\n'), ((26591, 26600), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (26597, 26600), True, 'import numpy as np\n'), ((26623, 26632), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (26629, 26632), True, 'import numpy as np\n'), ((26717, 26747), 'numpy.random.randn', 'np.random.randn', (['X.shape[0]', '(2)'], {}), '(X.shape[0], 2)\n', (26732, 26747), True, 'import numpy as np\n'), ((27366, 27383), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (27380, 27383), True, 'import numpy as np\n'), ((27400, 27417), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (27414, 27417), True, 'import numpy as np\n'), ((4254, 4264), 'numpy.sqrt', 'np.sqrt', (['W'], {}), '(W)\n', (4261, 4264), True, 'import numpy as np\n'), ((6415, 6437), 'numpy.round', 'np.round', (['(thetas * 255)'], {}), '(thetas * 255)\n', (6423, 6437), True, 'import numpy as np\n'), ((6741, 6757), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (6749, 6757), True, 'import numpy as np\n'), ((21895, 21930), 'numpy.ceil', 'np.ceil', (['(coords_info / plots_in_one)'], {}), '(coords_info / plots_in_one)\n', (21902, 21930), True, 'import numpy as np\n'), ((24368, 24394), 'numpy.zeros', 'np.zeros', (['self.X_.shape[0]'], {}), '(self.X_.shape[0])\n', (24376, 24394), True, 'import numpy as np\n'), ((24778, 24801), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (24790, 24801), True, 'import matplotlib.pyplot as plt\n'), ((25270, 25295), 'numpy.ones', 'np.ones', (['self.X_.shape[0]'], {}), '(self.X_.shape[0])\n', (25277, 25295), True, 'import numpy as np\n'), ((26780, 26789), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (26786, 26789), True, 'import numpy as np\n'), ((26925, 26948), 'numpy.round', 'np.round', (['(fscaled * 255)'], {}), '(fscaled * 255)\n', (26933, 26948), True, 'import numpy as np\n'), ((27447, 27456), 'numpy.cos', 'np.cos', (['s'], {}), '(s)\n', (27453, 27456), True, 'import numpy as np\n'), ((27489, 27498), 'numpy.cos', 'np.cos', (['s'], {}), '(s)\n', (27495, 27498), True, 'import numpy as np\n'), ((13606, 13622), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13614, 13622), True, 'import numpy as np\n'), ((15486, 15516), 'numpy.ones', 'np.ones', (['(self.X_.shape[0], 2)'], {}), '((self.X_.shape[0], 2))\n', (15493, 15516), True, 'import numpy as np\n'), ((15558, 15588), 'numpy.ones', 'np.ones', (['(self.X_.shape[0], 3)'], {}), '((self.X_.shape[0], 3))\n', (15565, 15588), True, 'import numpy as np\n'), ((24832, 24841), 'numpy.min', 'np.min', (['f'], {}), '(f)\n', (24838, 24841), True, 'import numpy as np\n'), ((24876, 24891), 'numpy.max', 'np.max', (['fscaled'], {}), '(fscaled)\n', (24882, 24891), True, 'import numpy as np\n'), ((13476, 13492), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13484, 13492), True, 'import numpy as np\n'), ((17992, 18016), 'numpy.array', 'np.array', (['[[np.nan] * 2]'], {}), '([[np.nan] * 2])\n', (18000, 18016), True, 'import numpy as np\n'), ((24923, 24946), 'numpy.round', 'np.round', (['(fscaled * 255)'], {}), '(fscaled * 255)\n', (24931, 24946), True, 'import numpy as np\n'), ((13533, 13547), 'numpy.cos', 'np.cos', (['coords'], {}), '(coords)\n', (13539, 13547), True, 'import numpy as np\n'), ((13549, 13563), 'numpy.sin', 'np.sin', (['coords'], {}), '(coords)\n', (13555, 13563), True, 'import numpy as np\n')]
|
# Import modified 'os' module with LC_LANG set so click doesn't complain
from .os_utils import os # noqa: F401
from collections import defaultdict
import click
DELIMITER = "X"
FASTA_PREFIX = "aligned_sequences"
CELL_BARCODE = 'CB'
UMI = 'UB'
BAM_FILENAME = 'possorted_genome_bam.bam'
BARCODES_TSV = 'barcodes.tsv'
def read_single_column(filename):
"""Read single-column barcodes.tsv and genes.tsv files from 10x"""
with open(filename) as f:
lines = set(line.strip() for line in f)
return lines
def read_10x_folder(folder):
"""Get QC-pass barcodes, genes, and bam file from a 10x folder
Parameters
----------
folder : str
Name of a 10x cellranger output folder containing
'possorted_genome_bam.bam' and 'barcodes.tsv' files
Returns
-------
barcodes : list
List of QC-passing barcodes from 'barcodes.tsv'
bam_file : bamnostic.AlignmentFile
Iterator over possorted_genome_bam.bam file
"""
import bamnostic as bs
barcodes = read_single_column(os.path.join(folder, BARCODES_TSV))
bam_file = bs.AlignmentFile(os.path.join(folder, BAM_FILENAME), mode='rb')
return barcodes, bam_file
def _pass_alignment_qc(alignment, barcodes):
"""Assert high quality mapping, QC-passing barcode and UMI of alignment"""
high_quality_mapping = alignment.mapq == 255
good_barcode = CELL_BARCODE in alignment.tags and \
alignment.get_tag(CELL_BARCODE) in barcodes
good_umi = UMI in alignment.tags
pass_qc = high_quality_mapping and good_barcode and good_umi
return pass_qc
def _parse_barcode_renamer(barcodes, barcode_renamer):
"""
:param barcodes:
:param barcode_renamer:
:return:
"""
if barcode_renamer is not None:
renamer = {}
with open(barcode_renamer) as f:
for line in f.readlines():
barcode, renamed = line.split()
assert barcode in barcodes
renamer[barcode] = renamed
else:
renamer = dict(zip(barcodes, barcodes))
return renamer
def barcode_iterator(bam, barcodes, barcode_renamer):
"""Yield a (barcode, list of str) pair for each QC-pass barcode"""
bam_filtered = (x for x in bam if _pass_alignment_qc(x, barcodes))
renamer = _parse_barcode_renamer(barcodes, barcode_renamer)
# alignments only have a CELL_BARCODE tag if they past QC
bam_sort_by_barcode = sorted(bam_filtered,
key=lambda x: x.get_tag(CELL_BARCODE))
previous_barcode = None
barcode_alignments = []
for alignment in bam_sort_by_barcode:
# Get barcode of alignment, looks like "AAATGCCCAAACTGCT-1"
barcode = alignment.get_tag(CELL_BARCODE)
# If this is a new non-null barcode, return all previous sequences
if previous_barcode is not None and barcode != previous_barcode:
yield renamer[previous_barcode], barcode_alignments
# Reset the barcode alignments
barcode_alignments = []
# Add only the aligned sequence to this list of barcode alignments
barcode_alignments.append(alignment.seq)
# Set this current barcode as the previous one
previous_barcode = barcode
# Yield the final one
yield renamer[previous_barcode], barcode_alignments
def _write_all_cells_in_one_file(cell_sequences, output_folder, fasta_prefix):
filename = os.path.join(output_folder,
f"{fasta_prefix}.fasta")
with open(filename, "w") as f:
for cell, seq in cell_sequences.items():
f.write(f">{cell}\n{seq}")
# this "pass" makes PyCharm happy
pass
return filename
def _write_one_cell_per_file(cell_sequences, output_folder, fasta_prefix):
os.makedirs(output_folder, exist_ok=True)
filenames = []
for cell, seq in cell_sequences.items():
filename = os.path.join(output_folder, f"{fasta_prefix}_{cell}.fasta")
with open(filename, "w") as f:
f.write(f">{cell}\n{seq}")
filenames.append(filename)
return filenames
def write_cell_sequences(cell_sequences, output_folder,
one_cell_per_file=False, fasta_prefix=FASTA_PREFIX):
if one_cell_per_file:
filenames = _write_one_cell_per_file(cell_sequences, output_folder,
fasta_prefix)
else:
filename = _write_all_cells_in_one_file(cell_sequences, output_folder,
fasta_prefix)
filenames = [filename]
return filenames
def bam_to_fasta(bam, barcodes, barcode_renamer, output_folder, delimiter="X",
one_cell_per_file=False, fasta_prefix=FASTA_PREFIX):
"""Convert 10x bam to one-record-per-cell fasta
Parameters
----------
bam : bamnostic.AlignmentFile
barcodes : list of str
QC-passing barcodes
barcode_renamer : str or None
Tab-separated filename mapping a barcode to a new name, e.g.
AAATGCCCAAACTGCT-1 lung_epithelial_cell|AAATGCCCAAACTGCT-1
delimiter : str, default "X"
Non-DNA or protein alphabet character to be ignored
Returns
-------
filenames : list
List of fasta filenames written
"""
bam_filtered = (x for x in bam if _pass_alignment_qc(x, barcodes))
renamer = _parse_barcode_renamer(barcodes, barcode_renamer)
cell_sequences = defaultdict(str)
for alignment in bam_filtered:
# Get barcode of alignment, looks like "AAATGCCCAAACTGCT-1"
barcode = alignment.get_tag(CELL_BARCODE)
renamed = renamer[barcode]
# Make a long string of all the cell sequences, separated
# by a non-alphabet letter
cell_sequences[renamed] += alignment.seq + delimiter + "\n"
return write_cell_sequences(cell_sequences, output_folder,
one_cell_per_file, fasta_prefix)
@click.command()
@click.argument("tenx_folder")
@click.option('--all-cells-in-one-file/--one-cell-per-file', default=True,
help="Create a single fasta, with each cell as a separate "
"record, whose sequences are separated by the delimiter "
f"'{DELIMITER}' (default), or create many fasta files, "
"one per cell")
@click.option('--barcode-renamer',
help="Tab-separated file mapping barcodes (column 1) to renamed "
"ids (column 2)")
@click.option("--output-folder", help="Folder to output to. Default is "
"current directory", default=".")
@click.option('--fasta-prefix', help="Filename prefix to use ",
default=FASTA_PREFIX)
@click.option('--delimiter', default=DELIMITER)
def fasta(tenx_folder, all_cells_in_one_file, barcode_renamer=None,
output_folder=".", fasta_prefix=FASTA_PREFIX, delimiter=DELIMITER):
"""Convert 10x bam to fasta of aligned sequences
Parameters
----------
tenx_folder : str
Location of tenx folder containing possorted_genome_bam.bam and
barcodes.tsv files
"""
barcodes, bam = read_10x_folder(tenx_folder)
one_cell_per_file = not all_cells_in_one_file
filenames = bam_to_fasta(bam, barcodes, barcode_renamer=barcode_renamer,
output_folder=output_folder, delimiter=delimiter,
fasta_prefix=fasta_prefix,
one_cell_per_file=one_cell_per_file)
if len(filenames) == 1:
filename = filenames[0]
click.echo(f"Wrote {filename}")
else:
n_files = len(filenames)
click.echo(f"Wrote {n_files} fasta files in {output_folder}")
|
[
"click.argument",
"click.echo",
"click.option",
"click.command",
"collections.defaultdict"
] |
[((5977, 5992), 'click.command', 'click.command', ([], {}), '()\n', (5990, 5992), False, 'import click\n'), ((5994, 6023), 'click.argument', 'click.argument', (['"""tenx_folder"""'], {}), "('tenx_folder')\n", (6008, 6023), False, 'import click\n'), ((6025, 6294), 'click.option', 'click.option', (['"""--all-cells-in-one-file/--one-cell-per-file"""'], {'default': '(True)', 'help': 'f"""Create a single fasta, with each cell as a separate record, whose sequences are separated by the delimiter \'{DELIMITER}\' (default), or create many fasta files, one per cell"""'}), '(\'--all-cells-in-one-file/--one-cell-per-file\', default=True,\n help=\n f"Create a single fasta, with each cell as a separate record, whose sequences are separated by the delimiter \'{DELIMITER}\' (default), or create many fasta files, one per cell"\n )\n', (6037, 6294), False, 'import click\n'), ((6362, 6481), 'click.option', 'click.option', (['"""--barcode-renamer"""'], {'help': '"""Tab-separated file mapping barcodes (column 1) to renamed ids (column 2)"""'}), "('--barcode-renamer', help=\n 'Tab-separated file mapping barcodes (column 1) to renamed ids (column 2)')\n", (6374, 6481), False, 'import click\n'), ((6514, 6621), 'click.option', 'click.option', (['"""--output-folder"""'], {'help': '"""Folder to output to. Default is current directory"""', 'default': '"""."""'}), "('--output-folder', help=\n 'Folder to output to. Default is current directory', default='.')\n", (6526, 6621), False, 'import click\n'), ((6659, 6748), 'click.option', 'click.option', (['"""--fasta-prefix"""'], {'help': '"""Filename prefix to use """', 'default': 'FASTA_PREFIX'}), "('--fasta-prefix', help='Filename prefix to use ', default=\n FASTA_PREFIX)\n", (6671, 6748), False, 'import click\n'), ((6759, 6805), 'click.option', 'click.option', (['"""--delimiter"""'], {'default': 'DELIMITER'}), "('--delimiter', default=DELIMITER)\n", (6771, 6805), False, 'import click\n'), ((5468, 5484), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (5479, 5484), False, 'from collections import defaultdict\n'), ((7612, 7643), 'click.echo', 'click.echo', (['f"""Wrote {filename}"""'], {}), "(f'Wrote {filename}')\n", (7622, 7643), False, 'import click\n'), ((7695, 7756), 'click.echo', 'click.echo', (['f"""Wrote {n_files} fasta files in {output_folder}"""'], {}), "(f'Wrote {n_files} fasta files in {output_folder}')\n", (7705, 7756), False, 'import click\n')]
|
""" Fix Django's 'write-through' (cache and datastore storage) session
backend to work with Appengine's datastore, along with whatever cache
backend is in settings.
Basically a reworking of django.contrib.sessions.backends.db, so have
a look there for definitive docs.
"""
from google.appengine.ext import ndb
from appengine_sessions.models import Session
from django.contrib.sessions.backends.base import CreateError
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.exceptions import SuspiciousOperation
from django.utils.encoding import force_unicode
from django.conf import settings
from datetime import datetime, timedelta
class SessionStore(DBStore):
"""Implements a session store using Appengine's datastore API instead
of Django's abstracted DB API (since we no longer have nonrel -- just
vanilla Django)
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def get_ndb_session_key(self,session_key=None):
return ndb.Key(Session, session_key and session_key or self._get_or_create_session_key())
"""
Session Date related methods overridden to handle the NDB DateTimeProperty
get_expiry_age
get_expiry_date
set_expiry
Making sure session dates always use UTC datetimes with no tzinfo
"""
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.utcnow()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object).
Overridden to make sure that UTC time is used for NDB datetime
properties """
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.utcnow() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.utcnow() + value
self['_session_expiry'] = value
def load(self):
s = self.get_ndb_session_key().get()
if s:
# Make sure you compare UTC datetime now for NDB.
if s.expire_date > datetime.utcnow():
try:
return self.decode(force_unicode(s.session_data))
except SuspiciousOperation:
return {}
self.create()
return {}
def exists(self, session_key):
# If session key is None then False
if session_key:
ndb_session_key = ndb.Key(Session,session_key)
s = ndb_session_key.get()
return s is not None
return False
def save(self, must_create=False):
"""Create and save a Session object using db.run_in_transaction, with
key_name = session_key, raising CreateError if
unsuccessful.
"""
if must_create:
s = self.get_ndb_session_key().get()
if s:
raise CreateError()
session_data = self._get_session(no_load=must_create)
#ed = self.get_expiry_date()
#print datetime.datetime.utcoffset(ed)
def txn():
s = Session(
id=self._get_or_create_session_key(),
session_key=self.session_key,
session_data=self.encode(session_data),
expire_date=self.get_expiry_date()
)
s.put()
# This is tricky and probably needs some sanity checking, because
# TransactionFailedError can be raised, but the transaction can still
# go on to be committed to the datastore. As far as I can see there's
# no way to manually roll it back at that point. No idea how to test
# this either.
try:
ndb.transaction(txn)
except (ndb.Rollback):
raise CreateError()
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._get_or_create_session_key()
self.get_ndb_session_key(session_key).delete()
# db.delete(db.Key.from_path('Session', session_key))
# Again, circular import fix
|
[
"google.appengine.ext.ndb.transaction",
"django.utils.encoding.force_unicode",
"django.contrib.sessions.backends.base.CreateError",
"datetime.datetime.utcnow",
"datetime.timedelta",
"google.appengine.ext.ndb.Key"
] |
[((1718, 1735), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1733, 1735), False, 'from datetime import datetime, timedelta\n'), ((2216, 2233), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2231, 2233), False, 'from datetime import datetime, timedelta\n'), ((2236, 2261), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'expiry'}), '(seconds=expiry)\n', (2245, 2261), False, 'from datetime import datetime, timedelta\n'), ((3744, 3773), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['Session', 'session_key'], {}), '(Session, session_key)\n', (3751, 3773), False, 'from google.appengine.ext import ndb\n'), ((4995, 5015), 'google.appengine.ext.ndb.transaction', 'ndb.transaction', (['txn'], {}), '(txn)\n', (5010, 5015), False, 'from google.appengine.ext import ndb\n'), ((3144, 3161), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3159, 3161), False, 'from datetime import datetime, timedelta\n'), ((3386, 3403), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3401, 3403), False, 'from datetime import datetime, timedelta\n'), ((4193, 4206), 'django.contrib.sessions.backends.base.CreateError', 'CreateError', ([], {}), '()\n', (4204, 4206), False, 'from django.contrib.sessions.backends.base import CreateError\n'), ((5065, 5078), 'django.contrib.sessions.backends.base.CreateError', 'CreateError', ([], {}), '()\n', (5076, 5078), False, 'from django.contrib.sessions.backends.base import CreateError\n'), ((3465, 3494), 'django.utils.encoding.force_unicode', 'force_unicode', (['s.session_data'], {}), '(s.session_data)\n', (3478, 3494), False, 'from django.utils.encoding import force_unicode\n')]
|