code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def swahili(path):
"""Swahili
Attitudes towards the Swahili language among Kenyan school children
A dataset with 480 observations on the following 4 variables.
`Province`
`NAIROBI` or `PWANI`
`Sex`
`female` or `male`
`Attitude.Score`
Score (out a possible 200 points) on a survey of attitude towards the
Swahili language
`School`
Code for the school: `A` through `L`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `swahili.csv`.
Returns:
Tuple of np.ndarray `x_train` with 480 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'swahili.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Swahili.csv'
maybe_download_and_extract(path, url,
save_file_name='swahili.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"observations.util.maybe_download_and_extract",
"os.path.join",
"os.path.expanduser"
] | [((995, 1019), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (1013, 1019), False, 'import os\n'), ((1169, 1255), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (['path', 'url'], {'save_file_name': '"""swahili.csv"""', 'resume': '(False)'}), "(path, url, save_file_name='swahili.csv', resume=\n False)\n", (1195, 1255), False, 'from observations.util import maybe_download_and_extract\n'), ((1335, 1363), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1347, 1363), False, 'import os\n'), ((1071, 1099), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1083, 1099), False, 'import os\n')] |
#!/usr/bin/env python
import requests
import re
from hashlib import md5
from search_client import *
from api_credentials import *
## API Configuration
# --------------------------------------------
GOOGLE_WEB_ENTRY = 'http://www.google.com/'
GOOGLE_WEB_FUNC = 'images'
## Search Class
# --------------------------------------------
class GoogleOldWebSearch(requests.Session, SearchClient):
"""Wrapper class for Google Image Search using web interface.
This class does not use any API, but instead extracts results directly from the
web search pages (acting as Internet Explorer 7.0).
Created November 2010, Updated May 2011, confirmed working October 2012.
Broken as of November 2013.
"""
def __init__(self, async_query=True, timeout=5.0, **kwargs):
super(GoogleOldWebSearch, self).__init__()
self.headers.update(kwargs)
self.timeout = timeout
self._results_per_req = 20
self._supported_sizes_map = {'small': 's',
'medium': 'm',
'large': 'l'}
self._supported_styles_map = {'photo': 'photo',
'graphics': 'clipart',
'clipart': 'clipart',
'lineart': 'lineart',
'face': 'face'}
self.async_query = async_query
def _fetch_results_from_offset(self, query, result_offset,
aux_params={}, headers={},
num_results=-1):
if num_results == -1:
num_results = self._results_per_req
image_url_pattern = re.compile(r'/imgres\?imgurl=(.*?)&')
image_id_pattern = re.compile(r'tbn:(.*?)"')
try:
# add query position to auxilary parameters
aux_params['q'] = query
aux_params['start'] = result_offset
resp = self.get(GOOGLE_WEB_ENTRY + GOOGLE_WEB_FUNC,
params=aux_params, headers=headers)
resp_str = resp.text
image_urls = image_url_pattern.findall(resp_str)[:(num_results-result_offset)]
image_ids = image_id_pattern.findall(resp_str)[:(num_results-result_offset)]
resp_dict = [{'url': item[0],
'image_id': md5(item[1]).hexdigest()} for item in zip(image_urls, image_ids)]
return resp_dict
except requests.exceptions.RequestException:
return []
def query(self, query, size='medium', style='photo', num_results=100):
# prepare query parameters
size = self._size_to_native_size(size)
style = self._style_to_native_style(style)
# prepare auxilary parameters (contained in tbs)
tbs_list = []
if size:
tbs_list.append('isz:%s' % size)
if style:
tbs_list.append('itp:%s' % style)
tbs_str = ','.join(tbs_list)
aux_params = {}
if tbs_str:
aux_params['tbs'] = tbs_str
headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)'}
# do request
results = self._fetch_results(query,
num_results,
aux_params=aux_params,
headers=headers)
return results
| [
"hashlib.md5",
"re.compile"
] | [((1717, 1754), 're.compile', 're.compile', (['"""/imgres\\\\?imgurl=(.*?)&"""'], {}), "('/imgres\\\\?imgurl=(.*?)&')\n", (1727, 1754), False, 'import re\n'), ((1782, 1806), 're.compile', 're.compile', (['"""tbn:(.*?)\\""""'], {}), '(\'tbn:(.*?)"\')\n', (1792, 1806), False, 'import re\n'), ((2398, 2410), 'hashlib.md5', 'md5', (['item[1]'], {}), '(item[1])\n', (2401, 2410), False, 'from hashlib import md5\n')] |
import math
def f(x):
return math.pow(x, 2) + 3 * x + 15
def riemannIntegral(interval, a):
x = interval[0]
step = (interval[1] - interval[0]) / a
x1 = x + step
integral = 0
for i in range (interval[0], a):
width = x1 - x
height = f(x1)
integral += width * height
x = x1
x1 = x + step
return integral
print(riemannIntegral([0, 5], 5))
print(riemannIntegral([0, 5], 8))
print(riemannIntegral([0, 5], 1_000_000)) | [
"math.pow"
] | [((34, 48), 'math.pow', 'math.pow', (['x', '(2)'], {}), '(x, 2)\n', (42, 48), False, 'import math\n')] |
import pandas as pd
import numpy as np
USAhousing = pd.read_csv('USA_Housing.csv')
print(USAhousing.head())
print(USAhousing.tail()) | [
"pandas.read_csv"
] | [((54, 84), 'pandas.read_csv', 'pd.read_csv', (['"""USA_Housing.csv"""'], {}), "('USA_Housing.csv')\n", (65, 84), True, 'import pandas as pd\n')] |
import pypyodbc
from group_plugin import GroupPlugin, Group
class ODBCGroupPlugin(GroupPlugin):
def __init__(self):
super(ODBCGroupPlugin, self).__init__()
self.connection_str = self.get_conf_option('connection_str')
self.groups_sql = self.get_conf_option('groups_sql')
self.change_group_sql = self.get_conf_option('change_group_sql')
def get_list(self):
groups = []
connection = None
try:
connection = pypyodbc.connect(self.connection_str)
rows = connection.cursor().execute(self.groups_sql)
for row in rows:
groups.append(Group(row[0], row[1]))
return groups
except pypyodbc.DatabaseError as ex:
raise
finally:
if connection is not None:
connection.close()
def change_group(self, user_id, group_id):
connection = None
try:
connection = pypyodbc.connect(self.connection_str)
connection.cursor().execute(self.change_group_sql, (group_id, user_id))
connection.commit()
except:
raise
finally:
if connection is not None:
connection.close()
| [
"pypyodbc.connect",
"group_plugin.Group"
] | [((484, 521), 'pypyodbc.connect', 'pypyodbc.connect', (['self.connection_str'], {}), '(self.connection_str)\n', (500, 521), False, 'import pypyodbc\n'), ((960, 997), 'pypyodbc.connect', 'pypyodbc.connect', (['self.connection_str'], {}), '(self.connection_str)\n', (976, 997), False, 'import pypyodbc\n'), ((645, 666), 'group_plugin.Group', 'Group', (['row[0]', 'row[1]'], {}), '(row[0], row[1])\n', (650, 666), False, 'from group_plugin import GroupPlugin, Group\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 09:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0010_auto_20170603_1441'),
]
operations = [
migrations.AlterField(
model_name='book',
name='note',
field=models.TextField(blank=True, null=True),
),
]
| [
"django.db.models.TextField"
] | [((392, 431), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (408, 431), False, 'from django.db import migrations, models\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test de threads pour la batterie de metafor
from future import standard_library
standard_library.install_aliases()
import threading
import queue
import time
class WorkerThread(threading.Thread):
def __init__(self, num, queue):
threading.Thread.__init__(self)
self.num = num
self.queue = queue
def run(self):
while True:
item = self.queue.get()
if item == 'STOP':
break
time.sleep(1)
print("[%d] => %s" % (self.num, item))
self.queue.task_done()
print("[%d] DONE" % self.num)
def main(numthr=3, count=20):
queue = queue.Queue(numthr)
# starts threads
threads = []
for t in range(numthr):
tr = WorkerThread(t + 1, queue)
threads.append(tr)
tr.start()
# fills the queue
for i in range(count):
print('[main] putting job #%2d' % i)
queue.put('job #%2d' % i)
# sends "stop" command
for t in threads:
queue.put('STOP')
# waits for threads
print('[main] joining...')
for t in threads:
t.join()
print('fini!')
if __name__ == "__main__":
main(2)
| [
"threading.Thread.__init__",
"time.sleep",
"future.standard_library.install_aliases",
"queue.put",
"queue.Queue"
] | [((724, 758), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (756, 758), False, 'from future import standard_library\n'), ((1290, 1309), 'queue.Queue', 'queue.Queue', (['numthr'], {}), '(numthr)\n', (1301, 1309), False, 'import queue\n'), ((885, 916), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (910, 916), False, 'import threading\n'), ((1566, 1591), 'queue.put', 'queue.put', (["('job #%2d' % i)"], {}), "('job #%2d' % i)\n", (1575, 1591), False, 'import queue\n'), ((1650, 1667), 'queue.put', 'queue.put', (['"""STOP"""'], {}), "('STOP')\n", (1659, 1667), False, 'import queue\n'), ((1108, 1121), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1118, 1121), False, 'import time\n')] |
from pocketthrone.entities.event import *
from weakref import WeakKeyDictionary
class EventManager:
_tag = "[EventManager] "
listeners = WeakKeyDictionary()
eventQueue= []
@classmethod
def register(self, listener, tag="untagged"):
'''registers an object for receiving game events'''
self.listeners[listener] = 1
print(self._tag + "registered " + str(listener.__class__) + " in event queue.")
@classmethod
def unregister( self, listener):
'''unregisters an object from receiving game events'''
if listener in self.listeners:
print(self._tag + "unregistered " + str(listener.__class__) + " from event queue")
del self.listeners[listener]
@classmethod
def fire(self, event):
'''fires game event'''
if not isinstance(event, TickEvent) and not isinstance(event, MouseMovedEvent):
print(self._tag + "EVENT " + event.name)
for listener in list(self.listeners):
listener.on_event(event)
| [
"weakref.WeakKeyDictionary"
] | [((140, 159), 'weakref.WeakKeyDictionary', 'WeakKeyDictionary', ([], {}), '()\n', (157, 159), False, 'from weakref import WeakKeyDictionary\n')] |
import time
import picamera
import numpy as np
import cv2
with picamera.PiCamera() as camera:
camera.resolution = (3280, 2464)
camera. start_preview()
time. sleep(2)
camera.capture('image.data', 'yuv')
##################################################
fd = open('image.data', 'rb')
f=np.fromfile(fd, dtype=np.uint8, count=3280*2464)
im = f.reshape((3280, 2464))
fd.close()
cv2.imwrite('rawconverted.jpg', im) | [
"cv2.imwrite",
"numpy.fromfile",
"picamera.PiCamera",
"time.sleep"
] | [((301, 351), 'numpy.fromfile', 'np.fromfile', (['fd'], {'dtype': 'np.uint8', 'count': '(3280 * 2464)'}), '(fd, dtype=np.uint8, count=3280 * 2464)\n', (312, 351), True, 'import numpy as np\n'), ((390, 425), 'cv2.imwrite', 'cv2.imwrite', (['"""rawconverted.jpg"""', 'im'], {}), "('rawconverted.jpg', im)\n", (401, 425), False, 'import cv2\n'), ((63, 82), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (80, 82), False, 'import picamera\n'), ((163, 176), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (173, 176), False, 'import time\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy.io.wavfile as wav
import random
import tables
import pickle
def feed_to_hdf5(feature_vector, subject_num, utterance_train_storage, utterance_test_storage, label_train_storage,
label_test_storage):
"""
:param feature_vector: The feature vector for each sound file of shape: (num_frames,num_features_per_frame,num_channles.)
:param subject_num: The subject class in 'int' format.
:param utterance_storage: The HDF5 object for storing utterance feature map.
:param label_train_storage: The HDF5 object for storing train label.
:param label_test_storage: The HDF5 object for storing test label.
:return: Each utterance will be stored in HDF5 file.
"""
num_utterances_per_speaker = 20
stride_step = 20
utterance_length = 80
num_frames = feature_vector.shape[0]
num_samples = int(np.floor((num_frames - utterance_length - num_utterances_per_speaker) / float(stride_step))) + 1
# Half of the samples will be fed for training.
range_training = range(int(4 * num_samples / 5))
range_training = range(1)
for sample_index in range_training:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_train_storage.append(utterance)
label_train_storage.append((np.array([subject_num + 1], dtype=np.int32)))
# The second half of each sound file will be used for testing on the same subject.
range_testing = range(int(4 * num_samples / 5), int(num_samples))
range_testing = range(1,2)
for sample_index in range_testing:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_test_storage.append(utterance)
label_test_storage.append((np.array([subject_num + 1], dtype=np.int32)))
| [
"numpy.array",
"numpy.zeros"
] | [((1387, 1430), 'numpy.zeros', 'np.zeros', (['(1, 80, 40, 20)'], {'dtype': 'np.float32'}), '((1, 80, 40, 20), dtype=np.float32)\n', (1395, 1430), True, 'import numpy as np\n'), ((2216, 2259), 'numpy.zeros', 'np.zeros', (['(1, 80, 40, 20)'], {'dtype': 'np.float32'}), '((1, 80, 40, 20), dtype=np.float32)\n', (2224, 2259), True, 'import numpy as np\n'), ((1838, 1881), 'numpy.array', 'np.array', (['[subject_num + 1]'], {'dtype': 'np.int32'}), '([subject_num + 1], dtype=np.int32)\n', (1846, 1881), True, 'import numpy as np\n'), ((2665, 2708), 'numpy.array', 'np.array', (['[subject_num + 1]'], {'dtype': 'np.int32'}), '([subject_num + 1], dtype=np.int32)\n', (2673, 2708), True, 'import numpy as np\n')] |
import setuptools
setuptools.setup(
name="almond-cloud-cli",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Command Line Interface (CLI) for Almond Cloud development and deployment",
url="https://github.com/stanford-oval/almond-cloud",
packages=setuptools.find_packages(),
python_requires=">=3,<4",
install_requires=[
"clavier==0.1.3a3",
"kubernetes>=19.15.0,<20",
"pyyaml>=6.0,<7",
],
scripts=[
"bin/almond-cloud",
],
)
| [
"setuptools.find_packages"
] | [((298, 324), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (322, 324), False, 'import setuptools\n')] |
from os import path, environ, makedirs
from peewee import SqliteDatabase
CACHE_FOLDER_NAME = '.sydler'
DB_FILE_NAME = 'member.db'
# create the cache folder before connecting to the data store
path_to_db = path.join(environ.get('HOME'), CACHE_FOLDER_NAME)
makedirs(path_to_db, exist_ok=True)
# create and connect to data store
DB = SqliteDatabase(path.join(path_to_db, DB_FILE_NAME))
DB.connect()
| [
"os.path.join",
"os.environ.get",
"os.makedirs"
] | [((256, 291), 'os.makedirs', 'makedirs', (['path_to_db'], {'exist_ok': '(True)'}), '(path_to_db, exist_ok=True)\n', (264, 291), False, 'from os import path, environ, makedirs\n'), ((216, 235), 'os.environ.get', 'environ.get', (['"""HOME"""'], {}), "('HOME')\n", (227, 235), False, 'from os import path, environ, makedirs\n'), ((347, 382), 'os.path.join', 'path.join', (['path_to_db', 'DB_FILE_NAME'], {}), '(path_to_db, DB_FILE_NAME)\n', (356, 382), False, 'from os import path, environ, makedirs\n')] |
import os
from dotenv import dotenv_values
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from twisted.internet.defer import inlineCallbacks
from autoinfo.cookie import CookieProvider
from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, \
MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, \
MongoEngineStore, MongoModelEngineStore
from autoinfo.services import AutoDetailsService
from autoinfo.utils import get_value_safely
from scrapper.scrapper.spiders import AutoInfoSeriesSpider, AutoInfoMakersSpider, AutoInfoModelsSpider, \
AutoInfoSubModelsSpider, AutoInfoYearsSpider, AutoInfoEnginesSpider
def start_scrapping():
with MongoConnector() as connector:
config = dotenv_values(".env")
settings = [
MongoConnectionSettings(
get_value_safely("MONGO_CONNECTION_ALIAS", config),
get_value_safely("MONGO_DATABASE", config),
get_value_safely("MONGO_AUTH_USERNAME", config),
get_value_safely("MONGO_AUTH_PASSWORD", config),
get_value_safely("MONGO_HOST", config),
get_value_safely("MONGO_PORT", config, int),
get_value_safely("MONGO_AUTH_DATABASE", config)
)
]
connector.create_connections(settings)
# create concrete stores
maker_store = MongoMakerStore()
models_store = MongoModelStore()
submodels_store = MongoSubModelStore()
model_cookies_store = MongoModelCookieStore()
model_years_store = MongoModelYearStore()
series_store = MongoSeriesStore()
model_series_store = MongoModelSeriesStore()
engine_store = MongoEngineStore()
model_engine_store = MongoModelEngineStore()
# create services
auto_details_service = AutoDetailsService(maker_store, models_store, submodels_store, model_cookies_store,
model_years_store, series_store, model_series_store, engine_store,
model_engine_store)
# create utils classes
cookie_provider = CookieProvider()
process = create_crawler_process(auto_details_service)
# We should run all these spiders consequently because:
# 1) Each of them depends on the results of running previous one
# 2) It also gives us flexibility to run only some particular spiders to crawl only required information.
# Since lists of makers, models and years are changed rarely we don't need to load them every time
# we run this scrapper. So we can make some sort of tasks which can be stored in a database and run spiders
# based on them. Or we just can comment out some of them at some time and run only required one to update
# only information which we need to update right now.
@inlineCallbacks
def run_spiders():
base_api_url = "https://online.autoinfo.com.au/oscar/Aut01nf0iiqq4/a"
yield process.crawl(AutoInfoMakersSpider, auto_details_service, cookie_provider, base_api_url)
yield process.crawl(AutoInfoModelsSpider, auto_details_service, cookie_provider, base_api_url)
yield process.crawl(AutoInfoSubModelsSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoYearsSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoSeriesSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoEnginesSpider, auto_details_service, base_api_url)
run_spiders()
process.start()
def create_crawler_process(auto_details_service):
# we need to specify a custom module name where to take overridden settings from
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', "scrapper.scrapper.settings")
settings = get_project_settings()
# we need to extend settings with passing custom objects to be able to inject them into pipelines
settings.set("AUTO_DETAILS_SERVICE", auto_details_service)
process = CrawlerProcess(settings)
return process
if __name__ == "__main__":
start_scrapping()
| [
"os.environ.setdefault",
"autoinfo.data.mongo.MongoMakerStore",
"autoinfo.services.AutoDetailsService",
"autoinfo.data.mongo.MongoModelSeriesStore",
"scrapy.utils.project.get_project_settings",
"autoinfo.data.mongo.MongoSeriesStore",
"autoinfo.data.mongo.MongoConnector",
"autoinfo.data.mongo.MongoMode... | [((3917, 3994), 'os.environ.setdefault', 'os.environ.setdefault', (['"""SCRAPY_SETTINGS_MODULE"""', '"""scrapper.scrapper.settings"""'], {}), "('SCRAPY_SETTINGS_MODULE', 'scrapper.scrapper.settings')\n", (3938, 3994), False, 'import os\n'), ((4010, 4032), 'scrapy.utils.project.get_project_settings', 'get_project_settings', ([], {}), '()\n', (4030, 4032), False, 'from scrapy.utils.project import get_project_settings\n'), ((4213, 4237), 'scrapy.crawler.CrawlerProcess', 'CrawlerProcess', (['settings'], {}), '(settings)\n', (4227, 4237), False, 'from scrapy.crawler import CrawlerProcess\n'), ((804, 820), 'autoinfo.data.mongo.MongoConnector', 'MongoConnector', ([], {}), '()\n', (818, 820), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((852, 873), 'dotenv.dotenv_values', 'dotenv_values', (['""".env"""'], {}), "('.env')\n", (865, 873), False, 'from dotenv import dotenv_values\n'), ((1498, 1515), 'autoinfo.data.mongo.MongoMakerStore', 'MongoMakerStore', ([], {}), '()\n', (1513, 1515), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1539, 1556), 'autoinfo.data.mongo.MongoModelStore', 'MongoModelStore', ([], {}), '()\n', (1554, 1556), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1583, 1603), 'autoinfo.data.mongo.MongoSubModelStore', 'MongoSubModelStore', ([], {}), '()\n', (1601, 1603), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1634, 1657), 'autoinfo.data.mongo.MongoModelCookieStore', 'MongoModelCookieStore', ([], {}), '()\n', (1655, 1657), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1686, 1707), 'autoinfo.data.mongo.MongoModelYearStore', 'MongoModelYearStore', ([], {}), '()\n', (1705, 1707), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1731, 1749), 'autoinfo.data.mongo.MongoSeriesStore', 'MongoSeriesStore', ([], {}), '()\n', (1747, 1749), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1779, 1802), 'autoinfo.data.mongo.MongoModelSeriesStore', 'MongoModelSeriesStore', ([], {}), '()\n', (1800, 1802), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1826, 1844), 'autoinfo.data.mongo.MongoEngineStore', 'MongoEngineStore', ([], {}), '()\n', (1842, 1844), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1874, 1897), 'autoinfo.data.mongo.MongoModelEngineStore', 'MongoModelEngineStore', ([], {}), '()\n', (1895, 1897), False, 'from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, MongoEngineStore, MongoModelEngineStore\n'), ((1956, 2134), 'autoinfo.services.AutoDetailsService', 'AutoDetailsService', (['maker_store', 'models_store', 'submodels_store', 'model_cookies_store', 'model_years_store', 'series_store', 'model_series_store', 'engine_store', 'model_engine_store'], {}), '(maker_store, models_store, submodels_store,\n model_cookies_store, model_years_store, series_store,\n model_series_store, engine_store, model_engine_store)\n', (1974, 2134), False, 'from autoinfo.services import AutoDetailsService\n'), ((2285, 2301), 'autoinfo.cookie.CookieProvider', 'CookieProvider', ([], {}), '()\n', (2299, 2301), False, 'from autoinfo.cookie import CookieProvider\n'), ((948, 998), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_CONNECTION_ALIAS"""', 'config'], {}), "('MONGO_CONNECTION_ALIAS', config)\n", (964, 998), False, 'from autoinfo.utils import get_value_safely\n'), ((1016, 1058), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_DATABASE"""', 'config'], {}), "('MONGO_DATABASE', config)\n", (1032, 1058), False, 'from autoinfo.utils import get_value_safely\n'), ((1076, 1123), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_AUTH_USERNAME"""', 'config'], {}), "('MONGO_AUTH_USERNAME', config)\n", (1092, 1123), False, 'from autoinfo.utils import get_value_safely\n'), ((1141, 1188), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_AUTH_PASSWORD"""', 'config'], {}), "('MONGO_AUTH_PASSWORD', config)\n", (1157, 1188), False, 'from autoinfo.utils import get_value_safely\n'), ((1206, 1244), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_HOST"""', 'config'], {}), "('MONGO_HOST', config)\n", (1222, 1244), False, 'from autoinfo.utils import get_value_safely\n'), ((1262, 1305), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_PORT"""', 'config', 'int'], {}), "('MONGO_PORT', config, int)\n", (1278, 1305), False, 'from autoinfo.utils import get_value_safely\n'), ((1323, 1370), 'autoinfo.utils.get_value_safely', 'get_value_safely', (['"""MONGO_AUTH_DATABASE"""', 'config'], {}), "('MONGO_AUTH_DATABASE', config)\n", (1339, 1370), False, 'from autoinfo.utils import get_value_safely\n')] |
#!/usr/bin/env python3
#
# SPDX-License-Identifier: MIT
#
# This file is formatted with Python Black
"""
A Parser helper function to convert a byte array to a Python object and the
other way around. The conversion is specified in a list of :class:`Spec`
instances, for example:
>>> data = bytes(range(16))
>>> spec = [
... Spec("B", "zero"),
... Spec("B", "first"),
... Spec("H", "second", endian="BE"),
... Spec("H", "third", endian="le"),
... Spec("BB", "tuples", repeat=5)
... ]
...
>>> result = Parser.to_object(data, spec)
>>> assert result.size == len(data)
>>> assert result.object.zero == 0
>>> assert result.object.first == 0x1
>>> assert result.object.second == 0x0203
>>> assert result.object.third == 0x0504 # little endian
>>> assert result.object.tuples == [(6, 7), (8, 9), (10, 11), (12, 13), (14, 15)]
And likewise, an object can be turned into a bytearray: ::
>>> new_data = Parser.from_object(result.object, spec)
>>> assert new_data == data
See the :class:`Spec` documentation for details on the format.
"""
import attr
import logging
import re
import struct
from ratbag.util import as_hex
from typing import Any, Callable, Dict, List, Optional, Type, Union
logger = logging.getLogger(__name__)
@attr.s
class Spec(object):
"""
The format specification for a single **logical** in a data set. This is
used in :meth:`Parser.to_object` or :meth:`Parser.from_object` to convert
from or to a byte stream. For example:
- ``Spec("B", "myattr")`` is a single byte from/to an object's ``myattr``
property
- ``Spec("BB", "point")`` is a tuple of two bytes from/to an object's ``myattr``
property
See :meth:`Parser.to_object` and :meth:`Parser.from_object` for details.
"""
@attr.s
class ConverterArg:
"""
The argument passed to :attr:`convert_to_data`
"""
bytes: bytes = attr.ib()
value: Any = attr.ib()
index: int = attr.ib()
format: str = attr.ib()
"""
The format, must be compatible to Python's ``struct`` format specifiers,
excluding the endian prefixes. If the format contains more than one
element, the respective object attribute is a tuple.
With the exception of fixed-length strings (``4s`` for a 4-byte string)
this format must not contain any repeat specifiers. Use the ``repeat``
attribute instead. IOW:
>>> Spec("3s", "string") # One 3-byte string
>>> Spec("s", "string", repeat=3) # Three 1-byte strings
>>> Spec("3H", "foo") # Not permitted
"""
name: str = attr.ib()
"""
The name to assign to the resulting object attribute.
"""
endian: str = attr.ib(default="BE", validator=attr.validators.in_(["BE", "le"]))
"""
Endianess of the field, one of ``"BE"`` or ``"le"``.
"""
repeat: int = attr.ib(default=1, validator=attr.validators.instance_of(int))
"""
The number of times this field repeats in struct. Where repeat is greater
than 1, the resulting attribute is a list with ``repeat`` elements (each
element may be tuple, see ``format``).
"""
greedy: bool = attr.ib(default=False)
"""
If true, ``repeat`` is ignored and the current field repeats until the
remainder of the available data. This takes the current format spec into
account. For example, a `HH` tuple has 4 bytes and will repeat 5 times in
a data size 20.
If the data size is not a multiple of the current format size, the
remainder is silently skipped:
>>> spec = Spec("H", "foo", greedy=True)
>>> data = Parser.to_object(bytes(5), spec)
>>> assert data.object.size == 4
"""
convert_from_data: Optional[Callable[[Any], Any]] = attr.ib(default=None)
"""
Conversion function for the data. An example for converting a sequence of
bytes to a string:
>>> spec = Spec("B", "foo", repeat=3, convert_from_data=lambda s: bytes(s).decode("utf-8"))
# Or alternatively use the string length format:
>>> spec = Spec("3s", "foo", convert_from_data=lambda s: s.decode("utf-8"))
>>> data = Parser.to_object("bar".encode("utf-8"), spec)
>>> assert data.object.foo == "bar"
Note that the conversion happens once all ``repeat`` have been completed,
i.e. the input value for ``repeat > 1`` is a list.
"""
convert_to_data: Optional[Callable[[ConverterArg], Any]] = attr.ib(default=None)
"""
Conversion function of this attribute to data. This function takes the
data bytes produced so far by :meth:`Parser.from_object` and the current
value and index (if applicable). It must return a value compatible to the
format specifier. Specifically:
- if ``format`` specifies more than one type, the return value must be a
tuple
- if ``repeat`` is greater than 1, the return value must be a list of
``repeat`` elements. Note that this function is called once per element
the list, with the data argument updated accordingly.
An example for producing a checksum with ``some_crc()``: ::
>>> specs = [] # other fields
>>> checksum_spec("H", "checksum", convert_to_data=lambda bs, v, idx: some_crc(bs))
>>> data = Parser.from_object(myobj, specs + checksum_spec)
>>> assert data[-2:] == some_crc(data[:-2])
"""
_size: int = attr.ib(init=False)
_count: int = attr.ib(init=False)
def __attrs_post_init__(self):
self._size = struct.calcsize(self.format)
invalid = re.findall(r"\d+[^s\d]+", self.format)
assert not invalid, f"Invalid use of repeat found in pattern(s): {invalid}"
# struct allows repeats which are useful for strings in particular.
# Where they're used, make the count a function of the struct format
# specifiers only, not the repeats, i.e. a format like "3s" is one
# string, not a tuple of two.
self._count = len(re.sub(r"[0-9]", "", self.format))
@repeat.validator
def _check_repeat(self, attribute, value):
if value <= 0:
raise ValueError("repeat must be greater than zero")
@attr.s
class Result(object):
"""
The return value from :meth:`Parser.to_object`
"""
object: Any = attr.ib()
"""
The object passed to :meth:`Parser.to_object` or otherwise an unspecified
instance with all attribute names as requested by the parser spec.
"""
size: int = attr.ib()
"""
The number of bytes used to create this object
"""
@attr.s
class Parser(object):
@classmethod
def to_object(
cls,
data: bytes,
specs: List[Spec],
obj: object = None,
result_class: Union[str, Type] = "_ResultObject",
) -> Result:
"""
Convert the given data into an object according to the specs. If
``obj`` is not ``None``, the attributes are set on that
object (resetting any attributes of the same name already set on the
object). Otherwise, a new generic object is created with all
attributes as specified in the parser specs.
The ``result_class`` specifies either the type of class to
instantiate, or the name of the created class for this object.
>>> specs = [Spec("B", "field")]
>>> r = Parser.to_object(bytes(16), specs, result_class = "Foo")
>>> print(type(r.object).__name__)
Foo
>>> class Bar:
... def __init__(self, field):
... pass
>>> r = Parser.to_object(bytes(16), specs, result_class = Bar)
>>> assert isinstance(r.object, Bar)
Where an existing type is used, that type must take all Spec fields as
keyword arguments in the constructor.
"""
# Only the last element can be greedy
assert all([spec.greedy is False for spec in list(reversed(specs))[1:]])
# This parser is quite noisy but if the input is a zero-byte array
# (used by some drivers to init an object with all spec fields) we
# disable the logger. This should be handled better (specifically: the
# driver shouldn't need to do this) but for now it'll do.
disable_logger = data == bytes(len(data))
if disable_logger:
logger.debug("Parsing zero byte array, detailed output is skipped")
# All parsing data is temporarily stored in this dictionary which is
# simply: { spec.name: parsed_value }
# Once we're done parsing we move all these to the object passed in
values: Dict[str, Any] = {}
offset = 0
for spec in specs:
endian = {"BE": ">", "le": "<"}[spec.endian]
if spec.greedy:
repeat = len(data[offset:]) // struct.calcsize(spec.format)
else:
repeat = spec.repeat
for idx in range(repeat):
try:
val = struct.unpack_from(endian + spec.format, data, offset=offset)
except struct.error as e:
logger.error(
f"Parser error while parsing spec {spec} at offset {offset}: {e}"
)
raise e
if spec.name == "_":
debugstr = "<pad bytes>"
elif spec.name == "?":
debugstr = "<unknown>"
else:
if spec._count == 1:
val = val[0]
if repeat > 1:
debugstr = f"self.{spec.name:24s} += {val}"
if idx == 0:
values[spec.name] = []
values[spec.name].append(val)
else:
debugstr = f"self.{spec.name:24s} = {val}"
values[spec.name] = val
if not disable_logger:
logger.debug(
f"offset {offset:02d}: {as_hex(data[offset:offset+spec._size]):5s} → {debugstr}"
)
offset += spec._size
if spec.convert_from_data is not None:
values[spec.name] = spec.convert_from_data(values[spec.name])
# if we don't have an object, construct an attr class with the spec
# names (skipping padding/unknown). This makes printing and inspecting
# results a lot saner.
if obj is None:
vals = {n.lstrip("_"): v for n, v in values.items()}
if isinstance(result_class, str):
c = attr.make_class(result_class, attrs=list(values.keys()))
# private fields in attr drop the leading underscore in the
# constructor
obj = c(**vals)
else:
# Instantiate the given directly
obj = result_class(**vals)
else:
for name, value in values.items():
setattr(obj, name, value)
return Result(obj, offset)
@classmethod
def from_object(cls, obj: Any, specs: List[Spec], pad_to: int = 0) -> bytes:
"""
Convert the attributes on the given objects to a byte array, given the
specifications (in-order). This is the inverse of :meth:`Parser.to_object`.
Note that each attribute must exist on the object and have the format
compatible by its respective spec. For example, a :class:`Spec` with
- a format ``"BB"`` must be a tuple of 2 bytes
- a format ``"H"`` with a ``repeat`` of 5 must be a list of five 16-bit integers,
- a format ``"HB"`` with a ``repeat`` of 3 must be a list of three
tuples with a 16-bit integer and byte each
"""
data = bytearray(4096)
offset = 0
for spec in specs:
endian = {"BE": ">", "le": "<"}[spec.endian]
for idx in range(spec.repeat):
val: Any = None # just to shut up mypy
if spec.name in ["_", "?"]:
val = [0] * spec._count if spec._count > 1 else 0
if spec.repeat > 1:
val = spec.repeat * [val]
else:
val = getattr(obj, spec.name)
if spec.convert_to_data is not None:
val = spec.convert_to_data(
Spec.ConverterArg(data[:offset], val, idx)
)
if spec.repeat > 1:
val = val[idx]
if offset + spec._size >= len(data):
data.extend([0] * 4096)
if spec._count > 1:
struct.pack_into(endian + spec.format, data, offset, *val)
else:
struct.pack_into(endian + spec.format, data, offset, val)
if spec.name == "_":
debugstr = "<pad bytes>"
elif spec.name == "?":
debugstr = "<unknown>"
else:
debugstr = f"self.{spec.name}"
valstr = f"{val}"
logger.debug(
f"offset {offset:02d}: {debugstr:30s} is {valstr:8s} → {as_hex(data[offset:offset+spec._size]):5s}"
)
offset += spec._size
return bytes(data[:offset]).ljust(pad_to, b"\x00")
| [
"logging.getLogger",
"struct.calcsize",
"struct.unpack_from",
"ratbag.util.as_hex",
"attr.validators.instance_of",
"struct.pack_into",
"re.sub",
"re.findall",
"attr.validators.in_",
"attr.ib"
] | [((1287, 1314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1304, 1314), False, 'import logging\n'), ((2062, 2071), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2069, 2071), False, 'import attr\n'), ((2659, 2668), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2666, 2668), False, 'import attr\n'), ((3215, 3237), 'attr.ib', 'attr.ib', ([], {'default': '(False)'}), '(default=False)\n', (3222, 3237), False, 'import attr\n'), ((3812, 3833), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (3819, 3833), False, 'import attr\n'), ((4499, 4520), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (4506, 4520), False, 'import attr\n'), ((5440, 5459), 'attr.ib', 'attr.ib', ([], {'init': '(False)'}), '(init=False)\n', (5447, 5459), False, 'import attr\n'), ((5478, 5497), 'attr.ib', 'attr.ib', ([], {'init': '(False)'}), '(init=False)\n', (5485, 5497), False, 'import attr\n'), ((6329, 6338), 'attr.ib', 'attr.ib', ([], {}), '()\n', (6336, 6338), False, 'import attr\n'), ((6520, 6529), 'attr.ib', 'attr.ib', ([], {}), '()\n', (6527, 6529), False, 'import attr\n'), ((1971, 1980), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1978, 1980), False, 'import attr\n'), ((2002, 2011), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2009, 2011), False, 'import attr\n'), ((2033, 2042), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2040, 2042), False, 'import attr\n'), ((5555, 5583), 'struct.calcsize', 'struct.calcsize', (['self.format'], {}), '(self.format)\n', (5570, 5583), False, 'import struct\n'), ((5602, 5641), 're.findall', 're.findall', (['"""\\\\d+[^s\\\\d]+"""', 'self.format'], {}), "('\\\\d+[^s\\\\d]+', self.format)\n", (5612, 5641), False, 'import re\n'), ((2793, 2826), 'attr.validators.in_', 'attr.validators.in_', (["['BE', 'le']"], {}), "(['BE', 'le'])\n", (2812, 2826), False, 'import attr\n'), ((2948, 2980), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (2975, 2980), False, 'import attr\n'), ((6018, 6050), 're.sub', 're.sub', (['"""[0-9]"""', '""""""', 'self.format'], {}), "('[0-9]', '', self.format)\n", (6024, 6050), False, 'import re\n'), ((8862, 8890), 'struct.calcsize', 'struct.calcsize', (['spec.format'], {}), '(spec.format)\n', (8877, 8890), False, 'import struct\n'), ((9031, 9092), 'struct.unpack_from', 'struct.unpack_from', (['(endian + spec.format)', 'data'], {'offset': 'offset'}), '(endian + spec.format, data, offset=offset)\n', (9049, 9092), False, 'import struct\n'), ((12773, 12831), 'struct.pack_into', 'struct.pack_into', (['(endian + spec.format)', 'data', 'offset', '*val'], {}), '(endian + spec.format, data, offset, *val)\n', (12789, 12831), False, 'import struct\n'), ((12874, 12931), 'struct.pack_into', 'struct.pack_into', (['(endian + spec.format)', 'data', 'offset', 'val'], {}), '(endian + spec.format, data, offset, val)\n', (12890, 12931), False, 'import struct\n'), ((13312, 13352), 'ratbag.util.as_hex', 'as_hex', (['data[offset:offset + spec._size]'], {}), '(data[offset:offset + spec._size])\n', (13318, 13352), False, 'from ratbag.util import as_hex\n'), ((10082, 10122), 'ratbag.util.as_hex', 'as_hex', (['data[offset:offset + spec._size]'], {}), '(data[offset:offset + spec._size])\n', (10088, 10122), False, 'from ratbag.util import as_hex\n')] |
'''mpi4py wrapper that allows an ensemble of serial applications to run in
parallel across ranks on the computing resource'''
import argparse
from collections import defaultdict
import os
import sys
import logging
import random
from subprocess import Popen, STDOUT, TimeoutExpired
import shlex
import signal
import time
from mpi4py import MPI
from django.db import transaction, connections
from balsam import config_logging, settings, setup
setup()
from balsam.launcher.exceptions import *
from balsam.launcher.util import cd, get_tail, remaining_time_minutes
from balsam.core.models import BalsamJob, safe_select
logger = logging.getLogger('balsam.launcher.mpi_ensemble')
config_logging('serial-launcher')
comm = MPI.COMM_WORLD
RANK = comm.Get_rank()
MSG_BUFSIZE = 2**16
connections.close_all()
class ResourceManager:
FETCH_PERIOD = 2.0
KILLED_REFRESH_PERIOD = 3.0
def __init__(self, job_source):
self.job_source = job_source
self.node_occupancy = [0.0 for i in range(comm.size)]
self.node_occupancy[0] = 1.0
self.running_locations = {}
self.job_occupancy = {}
self.last_job_fetch = -10.0
self.last_killed_refresh = -10.0
self.job_cache = []
self.killed_pks = []
self.recv_requests = {i:comm.irecv(MSG_BUFSIZE, source=i) for i in range(1,comm.size)}
self.job_source.check_qLaunch()
if self.job_source.qLaunch is not None:
sched_id = self.job_source.qLaunch.scheduler_id
self.RUN_MESSAGE = f'Scheduled by Balsam Service (Scheduler ID: {sched_id})'
else:
self.RUN_MESSAGE = 'Not scheduled by Balsam service'
logger.info(self.RUN_MESSAGE)
logger.info(f'Assigning jobs to {comm.size-1} worker ranks')
def refresh_job_cache(self):
now = time.time()
if len(self.job_cache) == 0 or (now-self.last_job_fetch) > self.FETCH_PERIOD:
jobquery = self.job_source.get_runnable(
max_nodes=1,
serial_only=True,
order_by=('node_packing_count', # ascending
'-wall_time_minutes') # descending
)
self.job_cache = list(jobquery[:10000])
self.last_job_fetch = now
logger.debug(f"Refreshed job cache: {len(self.job_cache)} runnable")
if len(self.job_cache) == 0:
logger.debug(f'Job cache query\n{jobquery.query}\n')
def refresh_killed_jobs(self):
now = time.time()
if now - self.last_killed_refresh > self.KILLED_REFRESH_PERIOD:
killed_pks = self.job_source.filter(state='USER_KILLED').values_list('job_id', flat=True)
if len(killed_pks) > len(self.killed_pks):
logger.info(f"Killed jobs: {self.killed_pks}")
self.killed_pks = killed_pks
self.last_killed_refresh = now
def pre_assign(self, rank, job):
job_occ = 1.0 / job.node_packing_count
self.node_occupancy[rank] += job_occ
self.job_occupancy[job.pk] = job_occ
self.running_locations[job.pk] = rank
def revert_assign(self, rank, job_pk):
job_occ = self.job_occupancy[job_pk]
self.node_occupancy[rank] -= job_occ
if self.node_occupancy[rank] < 0.0001: self.node_occupancy[rank] = 0.0
del self.job_occupancy[job_pk]
del self.running_locations[job_pk]
@transaction.atomic
def allocate_next_jobs(self):
'''Generator: yield (job,rank) pairs and mark the nodes/ranks as busy'''
self.refresh_job_cache()
send_requests = []
pre_assignments = defaultdict(list)
min_packing_count = 1
for job in self.job_cache:
if job.node_packing_count < min_packing_count: continue
job_occ = 1.0 / job.node_packing_count
free_ranks = (i for i in range(1, comm.size)
if self.node_occupancy[i]+job_occ < 1.0001)
rank = next(free_ranks, None)
if rank is None:
logger.debug(f'no free ranks to assign {job.cute_id}')
min_packing_count = job.node_packing_count + 1
else:
pre_assignments[rank].append(job)
self.pre_assign(rank, job)
if len(pre_assignments) == 0: return False
to_acquire = [job.pk for rank in pre_assignments
for job in pre_assignments[rank]]
acquired_pks = self.job_source.acquire(to_acquire)
logger.info(f'Acquired lock on {len(acquired_pks)} out of {len(to_acquire)} jobs marked for running')
# Make actual assignment:
for (rank, pre_jobs) in pre_assignments.items():
runjobs = []
for j in pre_jobs:
if j.pk in acquired_pks:
runjobs.append(j)
self.job_cache.remove(j)
else:
self.revert_assign(rank, j.pk)
if runjobs:
mpiReq = self._send_jobs(runjobs, rank)
logger.info(f"Sent {len(runjobs)} jobs to rank {rank}: occupancy is now {self.node_occupancy[rank]}")
send_requests.append(mpiReq)
BalsamJob.batch_update_state(acquired_pks, 'RUNNING', self.RUN_MESSAGE)
logger.debug("allocate_next_jobs: waiting on all isends...")
MPI.Request.waitall(send_requests)
logger.debug("allocate_next_jobs: all isends completed.")
return len(acquired_pks) > 0
def _send_jobs(self, jobs, rank):
'''Send message to compute rank'''
message = {}
message['tag'] = 'NEW'
for job in jobs:
job_spec = dict(
workdir=job.working_directory,
name=job.name,
cuteid=job.cute_id,
cmd=job.app_cmd,
envs=job.get_envs(),
envscript=job.envscript,
)
message[job.pk] = job_spec
req = comm.isend(message, dest=rank)
return req
def _get_requests(self):
completed_requests = []
stat = MPI.Status()
for rank in self.recv_requests:
req = self.recv_requests[rank]
logger.debug(f"calling req.test() on rank {rank}'s request...")
done, msg = req.test(status = stat)
logger.debug(f"req.test() call completed:\ndone = {done}\nmsg = {msg}")
if done:
completed_requests.append((stat.source, msg))
assert stat.source == rank
for rank,msg in completed_requests:
self.recv_requests[rank] = comm.irecv(MSG_BUFSIZE, source=rank)
return completed_requests
def serve_requests(self):
requests = self._get_requests()
done_jobs = []
error_jobs = []
killed_pks = []
send_reqs = []
for rank, msg in requests:
kill_pks, req = self._handle_ask(rank, msg['ask'])
killed_pks.extend(kill_pks)
send_reqs.append(req)
done_jobs.extend(msg['done'])
error_jobs.extend(msg['error'])
if done_jobs: self._handle_dones(done_jobs)
if error_jobs: self._handle_errors(error_jobs)
if killed_pks: self.job_source.release(killed_pks)
logger.debug("serve_requests: waiting on all isends...")
MPI.Request.waitall(send_reqs)
logger.debug("serve_requests: all isends completed.")
return len(requests)
def _handle_ask(self, rank, ask_pks):
self.refresh_killed_jobs()
response = {'tag': 'CONTINUE', 'kill_pks': []}
for pk in ask_pks:
if pk in self.killed_pks:
response['tag'] = 'KILL'
response['kill_pks'].append(pk)
req = comm.isend(response, dest=rank)
for pk in response['kill_pks']:
self.revert_assign(rank, pk)
if response['tag'] == 'KILL':
logger.info(f"Sent KILL to rank {rank} for {response['kill_pks']}\n"
f"occupancy is now {self.node_occupancy[rank]}")
return response['kill_pks'], req
def _handle_dones(self, done_pks):
for pk in done_pks:
rank = self.running_locations[pk]
self.revert_assign(rank, pk)
BalsamJob.batch_update_state(done_pks, 'RUN_DONE')
self.job_source.release(done_pks)
logger.info(f"RUN_DONE: {len(done_pks)} jobs")
@transaction.atomic
def _handle_errors(self, error_jobs):
error_pks = [j[0] for j in error_jobs]
safe_select(BalsamJob.objects.filter(pk__in=error_pks))
for pk,retcode,tail in error_jobs:
rank = self.running_locations[pk]
self.revert_assign(rank, pk)
job = BalsamJob.objects.get(pk=pk)
state_msg = f"nonzero return {retcode}: {tail}"
job.update_state('RUN_ERROR', state_msg)
self.job_source.release(error_pks)
def send_exit(self):
logger.debug(f"send_exit: waiting on all pending recvs")
active_ranks = list(set(self.running_locations.values()))
requests = [self.recv_requests[i] for i in active_ranks]
MPI.Request.waitall(requests)
reqs = []
logger.debug(f"send_exit: send EXIT tag to all ranks")
for i in range(1, comm.size):
req = comm.isend({'tag': 'EXIT'}, dest=i)
reqs.append(req)
MPI.Request.waitall(reqs)
class Master:
def __init__(self):
self.MAX_IDLE_TIME = 20.0
self.DELAY_PERIOD = 1.0
self.idle_time = 0.0
self.EXIT_FLAG = False
args = self.parse_args()
comm.bcast(args.gpus_per_node, root=0)
self.remaining_timer = remaining_time_minutes(args.time_limit_min)
next(self.remaining_timer)
job_source = BalsamJob.source
job_source.workflow = args.wf_name
job_source.start_tick()
job_source.clear_stale_locks()
self.manager = ResourceManager(job_source)
if job_source.workflow:
logger.info(f'MPI Ensemble pulling jobs with WF {args.wf_name}')
else:
logger.info('MPI Ensemble consuming jobs matching any WF name')
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--wf-name')
parser.add_argument('--time-limit-min', type=float, default=72.*60)
parser.add_argument('--gpus-per-node', type=int, default=0)
return parser.parse_args()
def exit(self):
outstanding_job_pks = list(self.manager.running_locations.keys())
num_timeout = len(outstanding_job_pks)
logger.info(f"Shutting down with {num_timeout} jobs still running..timing out")
BalsamJob.batch_update_state(outstanding_job_pks, 'RUN_TIMEOUT', 'timed out in MPI Ensemble')
self.manager.job_source.release_all_owned()
self.manager.send_exit()
logger.debug("Send_exit: master done")
logger.info(f"master calling MPI Finalize")
MPI.Finalize()
logger.info(f"ensemble master exit gracefully")
sys.exit(0)
def main(self):
for remaining_minutes in self.remaining_timer:
logger.debug(f"{remaining_minutes} minutes remaining")
self._main()
if self.EXIT_FLAG:
logger.info("EXIT_FLAG on; master breaking main loop")
break
if self.idle_time > self.MAX_IDLE_TIME and not self.manager.running_locations:
logger.info(f"Nothing to do for {self.MAX_IDLE_TIME} seconds: quitting")
break
self.exit()
def _main(self):
ran_anything = False
got_requests = 0
ran_anything = self.manager.allocate_next_jobs()
start = time.time()
got_requests = self.manager.serve_requests()
elapsed = time.time() - start
if got_requests: logger.debug(f"Served {got_requests} requests in {elapsed:.3f} seconds")
if not (ran_anything or got_requests):
time.sleep(self.DELAY_PERIOD)
self.idle_time += self.DELAY_PERIOD
else:
self.idle_time = 0.0
class FailedToStartProcess:
returncode = 12345
def wait(self, timeout=0): return 12345
def poll(self, timeout=0): return 12345
def communicate(self, timeout=0): pass
def terminate(self): pass
def kill(self): pass
class Worker:
CHECK_PERIOD=10
RETRY_WINDOW = 20
RETRY_CODES = [-11, 1, 255, 12345]
MAX_RETRY = 3
def __init__(self):
self.processes = {}
self.outfiles = {}
self.cuteids = {}
self.start_times = {}
self.retry_counts = {}
self.job_specs = {}
def _cleanup_proc(self, pk, timeout=0):
self._kill(pk, timeout=timeout)
self.processes[pk].communicate()
self.outfiles[pk].close()
for d in (self.processes, self.outfiles, self.cuteids, self.start_times,
self.retry_counts, self.job_specs):
del d[pk]
def _check_retcode(self, proc, timeout):
try:
retcode = proc.wait(timeout=timeout)
except TimeoutExpired:
retcode = None
return retcode
def _check_retcodes(self):
start = time.time()
pk_retcodes = []
for pk, proc in self.processes.items():
elapsed = time.time() - start
timeout = max(0, self.CHECK_PERIOD - elapsed)
retcode = self._check_retcode(proc, timeout)
pk_retcodes.append((pk, retcode))
return pk_retcodes
def _log_error_tail(self, pk, retcode):
fname = self.outfiles[pk].name
if os.path.exists(fname):
tail = get_tail(self.outfiles[pk].name)
else:
tail = ''
logmsg = self.log_prefix(pk) + f'nonzero return {retcode}:\n {tail}'
logger.error(logmsg)
return tail
def _can_retry(self, pk, retcode):
if retcode in self.RETRY_CODES:
elapsed = time.time() - self.start_times[pk]
retry_count = self.retry_counts[pk]
if elapsed < self.RETRY_WINDOW and retry_count <= self.MAX_RETRY:
logmsg = self.log_prefix(pk)
logmsg += (f'can retry task (err occured after {elapsed:.2f} sec; '
f'attempt {self.retry_counts[pk]}/{self.MAX_RETRY})')
logger.error(logmsg)
return True
return False
def _kill(self, pk, timeout=0):
p = self.processes[pk]
if p.poll() is None:
p.terminate()
logger.debug(f"rank {RANK} sent TERM to {self.cuteids[pk]}...waiting on shutdown")
try: p.wait(timeout=timeout)
except TimeoutExpired: p.kill()
def _launch_proc(self, pk):
job_spec = self.job_specs[pk]
workdir = job_spec['workdir']
name = job_spec['name']
cmd = job_spec['cmd']
envs = job_spec['envs']
envscript = job_spec['envscript']
if envscript:
args = ' '.join(['source', envscript, '&&', cmd])
shell = True
else:
args = shlex.split(cmd)
shell = False
if self.gpus_per_node > 0:
idx = list(self.job_specs.keys()).index(pk)
gpu_device = idx % self.gpus_per_node
envs['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
envs['CUDA_VISIBLE_DEVICES'] = str(gpu_device)
out_name = f'{name}.out'
logger.info(f"{self.log_prefix(pk)} Popen (shell={shell}):\n{args}")
if not os.path.exists(workdir): os.makedirs(workdir)
outfile = open(os.path.join(workdir, out_name), 'wb')
self.outfiles[pk] = outfile
try:
proc = Popen(args, stdout=outfile, stderr=STDOUT,
cwd=workdir, env=envs, shell=shell,)
except Exception as e:
proc = FailedToStartProcess()
logger.error(self.log_prefix(pk) + f"Popen error:\n{str(e)}\n")
sleeptime = 0.5 + 3.5*random.random()
time.sleep(sleeptime)
self.processes[pk] = proc
def _handle_error(self, pk, retcode):
tail = self._log_error_tail(pk, retcode)
if not self._can_retry(pk, retcode):
self._cleanup_proc(pk)
return (retcode, tail)
else:
self.outfiles[pk].close()
self.start_times[pk] = time.time()
self.retry_counts[pk] += 1
self._launch_proc(pk)
return 'running'
def log_prefix(self, pk=None):
prefix = f'rank {RANK} '
if pk: prefix += f'{self.cuteids[pk]} '
return prefix
def write_message(self, job_statuses):
msg = {'ask' : [], 'done' : [], 'error': []}
num_jobs = len(job_statuses)
num_errors = len([
s for s in job_statuses.values()
if s not in ["running","done"]
])
max_tail = (MSG_BUFSIZE - 110*num_jobs) // max(1,num_errors)
for pk, status in job_statuses.items():
if status == 'running':
msg['ask'].append(pk)
elif status == 'done':
msg['done'].append(pk)
else:
retcode, tail = status
tail = tail[-max_tail:]
msg['error'].append((pk, retcode, tail))
return msg
def update_processes(self):
statuses = {}
for pk, retcode in self._check_retcodes():
if retcode is None:
statuses[pk] = 'running'
elif retcode == 0:
statuses[pk] = 'done'
self._cleanup_proc(pk)
else:
statuses[pk] = self._handle_error(pk, retcode)
return statuses
def exit(self):
all_pks = list(self.processes.keys())
for pk in all_pks:
self._cleanup_proc(pk, timeout=self.CHECK_PERIOD)
MPI.Finalize()
sys.exit(0)
def start_jobs(self, msg):
assert msg['tag'] == 'NEW'
for pk in msg:
if pk == 'tag': continue
job_spec = msg[pk]
self.job_specs[pk] = job_spec
self.cuteids[pk] = job_spec['cuteid']
self.start_times[pk] = time.time()
self.retry_counts[pk] = 1
self._launch_proc(pk)
def kill_jobs(self, kill_pks):
for pk in kill_pks: self._cleanup_proc(pk, timeout=0)
def main(self):
tag = None
gpus_per_node = None
self.gpus_per_node = comm.bcast(gpus_per_node, root=0)
while tag != 'EXIT':
logger.debug(f"rank {RANK} waiting on recv from master...")
msg = comm.recv(source=0)
tag = msg['tag']
logger.debug(f"rank {RANK} recv done: got msg tag {tag}")
if tag == 'NEW':
self.start_jobs(msg)
elif tag == 'KILL':
self.kill_jobs(msg['kill_pks'])
elif tag == 'EXIT':
logger.debug(f"rank {RANK} received EXIT")
break
statuses = self.update_processes()
cuteids = ' '.join(self.cuteids.values())
logger.debug(f"rank {RANK} jobs: {cuteids}")
if len(statuses) > 0:
msg = self.write_message(statuses)
logger.debug(f"rank {RANK} sending request to master...")
comm.send(msg, dest=0)
logger.debug(f"rank {RANK} send done")
self.exit()
if __name__ == "__main__":
if RANK == 0:
master = Master()
def handle_term(signum, stack): master.EXIT_FLAG = True
signal.signal(signal.SIGINT, handle_term)
signal.signal(signal.SIGTERM, handle_term)
master.main()
else:
worker = Worker()
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
worker.main()
| [
"logging.getLogger",
"shlex.split",
"time.sleep",
"balsam.launcher.util.remaining_time_minutes",
"sys.exit",
"mpi4py.MPI.Finalize",
"balsam.core.models.BalsamJob.batch_update_state",
"balsam.core.models.BalsamJob.objects.filter",
"os.path.exists",
"argparse.ArgumentParser",
"balsam.launcher.util... | [((443, 450), 'balsam.setup', 'setup', ([], {}), '()\n', (448, 450), False, 'from balsam import config_logging, settings, setup\n'), ((626, 675), 'logging.getLogger', 'logging.getLogger', (['"""balsam.launcher.mpi_ensemble"""'], {}), "('balsam.launcher.mpi_ensemble')\n", (643, 675), False, 'import logging\n'), ((676, 709), 'balsam.config_logging', 'config_logging', (['"""serial-launcher"""'], {}), "('serial-launcher')\n", (690, 709), False, 'from balsam import config_logging, settings, setup\n'), ((776, 799), 'django.db.connections.close_all', 'connections.close_all', ([], {}), '()\n', (797, 799), False, 'from django.db import transaction, connections\n'), ((1824, 1835), 'time.time', 'time.time', ([], {}), '()\n', (1833, 1835), False, 'import time\n'), ((2504, 2515), 'time.time', 'time.time', ([], {}), '()\n', (2513, 2515), False, 'import time\n'), ((3644, 3661), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3655, 3661), False, 'from collections import defaultdict\n'), ((5240, 5311), 'balsam.core.models.BalsamJob.batch_update_state', 'BalsamJob.batch_update_state', (['acquired_pks', '"""RUNNING"""', 'self.RUN_MESSAGE'], {}), "(acquired_pks, 'RUNNING', self.RUN_MESSAGE)\n", (5268, 5311), False, 'from balsam.core.models import BalsamJob, safe_select\n'), ((5389, 5423), 'mpi4py.MPI.Request.waitall', 'MPI.Request.waitall', (['send_requests'], {}), '(send_requests)\n', (5408, 5423), False, 'from mpi4py import MPI\n'), ((6135, 6147), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (6145, 6147), False, 'from mpi4py import MPI\n'), ((7385, 7415), 'mpi4py.MPI.Request.waitall', 'MPI.Request.waitall', (['send_reqs'], {}), '(send_reqs)\n', (7404, 7415), False, 'from mpi4py import MPI\n'), ((8335, 8385), 'balsam.core.models.BalsamJob.batch_update_state', 'BalsamJob.batch_update_state', (['done_pks', '"""RUN_DONE"""'], {}), "(done_pks, 'RUN_DONE')\n", (8363, 8385), False, 'from balsam.core.models import BalsamJob, safe_select\n'), ((9232, 9261), 'mpi4py.MPI.Request.waitall', 'MPI.Request.waitall', (['requests'], {}), '(requests)\n', (9251, 9261), False, 'from mpi4py import MPI\n'), ((9472, 9497), 'mpi4py.MPI.Request.waitall', 'MPI.Request.waitall', (['reqs'], {}), '(reqs)\n', (9491, 9497), False, 'from mpi4py import MPI\n'), ((9775, 9818), 'balsam.launcher.util.remaining_time_minutes', 'remaining_time_minutes', (['args.time_limit_min'], {}), '(args.time_limit_min)\n', (9797, 9818), False, 'from balsam.launcher.util import cd, get_tail, remaining_time_minutes\n'), ((10310, 10335), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10333, 10335), False, 'import argparse\n'), ((10794, 10891), 'balsam.core.models.BalsamJob.batch_update_state', 'BalsamJob.batch_update_state', (['outstanding_job_pks', '"""RUN_TIMEOUT"""', '"""timed out in MPI Ensemble"""'], {}), "(outstanding_job_pks, 'RUN_TIMEOUT',\n 'timed out in MPI Ensemble')\n", (10822, 10891), False, 'from balsam.core.models import BalsamJob, safe_select\n'), ((11080, 11094), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (11092, 11094), False, 'from mpi4py import MPI\n'), ((11159, 11170), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (11167, 11170), False, 'import sys\n'), ((11835, 11846), 'time.time', 'time.time', ([], {}), '()\n', (11844, 11846), False, 'import time\n'), ((13331, 13342), 'time.time', 'time.time', ([], {}), '()\n', (13340, 13342), False, 'import time\n'), ((13741, 13762), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (13755, 13762), False, 'import os\n'), ((18030, 18044), 'mpi4py.MPI.Finalize', 'MPI.Finalize', ([], {}), '()\n', (18042, 18044), False, 'from mpi4py import MPI\n'), ((18053, 18064), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (18061, 18064), False, 'import sys\n'), ((19738, 19779), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'handle_term'], {}), '(signal.SIGINT, handle_term)\n', (19751, 19779), False, 'import signal\n'), ((19788, 19830), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handle_term'], {}), '(signal.SIGTERM, handle_term)\n', (19801, 19830), False, 'import signal\n'), ((19897, 19941), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_IGN'], {}), '(signal.SIGINT, signal.SIG_IGN)\n', (19910, 19941), False, 'import signal\n'), ((19950, 19995), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal.SIG_IGN'], {}), '(signal.SIGTERM, signal.SIG_IGN)\n', (19963, 19995), False, 'import signal\n'), ((8621, 8663), 'balsam.core.models.BalsamJob.objects.filter', 'BalsamJob.objects.filter', ([], {'pk__in': 'error_pks'}), '(pk__in=error_pks)\n', (8645, 8663), False, 'from balsam.core.models import BalsamJob, safe_select\n'), ((8813, 8841), 'balsam.core.models.BalsamJob.objects.get', 'BalsamJob.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (8834, 8841), False, 'from balsam.core.models import BalsamJob, safe_select\n'), ((11918, 11929), 'time.time', 'time.time', ([], {}), '()\n', (11927, 11929), False, 'import time\n'), ((12096, 12125), 'time.sleep', 'time.sleep', (['self.DELAY_PERIOD'], {}), '(self.DELAY_PERIOD)\n', (12106, 12125), False, 'import time\n'), ((13783, 13815), 'balsam.launcher.util.get_tail', 'get_tail', (['self.outfiles[pk].name'], {}), '(self.outfiles[pk].name)\n', (13791, 13815), False, 'from balsam.launcher.util import cd, get_tail, remaining_time_minutes\n'), ((15247, 15263), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (15258, 15263), False, 'import shlex\n'), ((15672, 15695), 'os.path.exists', 'os.path.exists', (['workdir'], {}), '(workdir)\n', (15686, 15695), False, 'import os\n'), ((15697, 15717), 'os.makedirs', 'os.makedirs', (['workdir'], {}), '(workdir)\n', (15708, 15717), False, 'import os\n'), ((15741, 15772), 'os.path.join', 'os.path.join', (['workdir', 'out_name'], {}), '(workdir, out_name)\n', (15753, 15772), False, 'import os\n'), ((15848, 15926), 'subprocess.Popen', 'Popen', (['args'], {'stdout': 'outfile', 'stderr': 'STDOUT', 'cwd': 'workdir', 'env': 'envs', 'shell': 'shell'}), '(args, stdout=outfile, stderr=STDOUT, cwd=workdir, env=envs, shell=shell)\n', (15853, 15926), False, 'from subprocess import Popen, STDOUT, TimeoutExpired\n'), ((16519, 16530), 'time.time', 'time.time', ([], {}), '()\n', (16528, 16530), False, 'import time\n'), ((18350, 18361), 'time.time', 'time.time', ([], {}), '()\n', (18359, 18361), False, 'import time\n'), ((13438, 13449), 'time.time', 'time.time', ([], {}), '()\n', (13447, 13449), False, 'import time\n'), ((14091, 14102), 'time.time', 'time.time', ([], {}), '()\n', (14100, 14102), False, 'import time\n'), ((16164, 16185), 'time.sleep', 'time.sleep', (['sleeptime'], {}), '(sleeptime)\n', (16174, 16185), False, 'import time\n'), ((16136, 16151), 'random.random', 'random.random', ([], {}), '()\n', (16149, 16151), False, 'import random\n')] |
import numpy as np
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Cividis256 as Pallete
from bokeh.plotting import Figure, figure
from bokeh.transform import factor_cmap
def draw_interactive_scatter_plot(
texts: np.ndarray,
xs: np.ndarray,
ys: np.ndarray,
values: np.ndarray,
labels: np.ndarray,
text_column: str,
label_column: str,
) -> Figure:
# Smooth down values for coloring, by taking the entropy = log10(perplexity) and multiply it by 10000
values = ((np.log10(values)) * 10000).round().astype(int)
# Normalize values to range between 0-255, to assign a color for each value
max_value = values.max()
min_value = values.min()
if max_value - min_value == 0:
values_color = np.ones(len(values))
else:
values_color = (
((values - min_value) / (max_value - min_value) * 255).round().astype(int)
)
values_color_sorted = sorted(values_color)
values_list = values.astype(str).tolist()
values_sorted = sorted(values_list)
labels_list = labels.astype(str).tolist()
source = ColumnDataSource(
data=dict(x=xs, y=ys, text=texts, label=values_list, original_label=labels_list)
)
hover = HoverTool(
tooltips=[(text_column, "@text{safe}"), (label_column, "@original_label")]
)
p = figure(plot_width=800, plot_height=800, tools=[hover])
p.circle(
"x",
"y",
size=10,
source=source,
fill_color=factor_cmap(
"label",
palette=[Pallete[id_] for id_ in values_color_sorted],
factors=values_sorted,
),
)
p.axis.visible = False
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.logo = None
return p
| [
"bokeh.transform.factor_cmap",
"numpy.log10",
"bokeh.plotting.figure",
"bokeh.models.HoverTool"
] | [((1245, 1334), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[(text_column, '@text{safe}'), (label_column, '@original_label')]"}), "(tooltips=[(text_column, '@text{safe}'), (label_column,\n '@original_label')])\n", (1254, 1334), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((1353, 1407), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(800)', 'plot_height': '(800)', 'tools': '[hover]'}), '(plot_width=800, plot_height=800, tools=[hover])\n', (1359, 1407), False, 'from bokeh.plotting import Figure, figure\n'), ((1507, 1609), 'bokeh.transform.factor_cmap', 'factor_cmap', (['"""label"""'], {'palette': '[Pallete[id_] for id_ in values_color_sorted]', 'factors': 'values_sorted'}), "('label', palette=[Pallete[id_] for id_ in values_color_sorted],\n factors=values_sorted)\n", (1518, 1609), False, 'from bokeh.transform import factor_cmap\n'), ((530, 546), 'numpy.log10', 'np.log10', (['values'], {}), '(values)\n', (538, 546), True, 'import numpy as np\n')] |
def plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ax.loglog(kbins, deltac_2, label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins, deltac_2_nodeconv, color="navy", linestyle='--')
ax.loglog(kbins, deltab_2, label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
# ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="upper left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
ax.set_ylim(1e-12, 2)
def plot_velocity_power_spectra(kbins, vdeltab_2, vdeltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology import linear_velocity_ps
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(vdeltab_2))
ax.loglog(kbins[ix][3:], vdeltac_2[ix][3:], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins[ix][3:], vdeltab_2[ix][3:], label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
cosmo = tf.cosmo
vdeltab_2_CAMB = linear_velocity_ps(k, np.sqrt(deltab_2_CAMB), **cosmo)**2
vdeltac_2_CAMB = linear_velocity_ps(k, np.sqrt(deltac_2_CAMB), **cosmo)**2
vnorm = vdeltab_2_CAMB/deltab_2_CAMB
k, pkb = tf.TF_Pk(TF.VBARYON)
k, pkc = tf.TF_Pk(TF.VCDM)
vdeltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
vdeltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, vdeltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, vdeltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
ax.legend(loc="lower left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
# ax.set_ylim(1e-12, 2)
def plot_velocity(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
# axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[ix][3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=ax)
# kbins, vdeltab_2, vdeltac_2, tf = data
# plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
# axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot_power_spectra_bias(kbins_bias, deltab_2_bias, deltac_2_bias, kbins, deltab_2, deltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(8,6))
gs = gridspec.GridSpec(5,4,wspace=0.,hspace=0.)
ax = fig.add_subplot(gs[2:,:])
ax2 = fig.add_subplot(gs[:2,:], sharex=ax)
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(deltab_2_bias))
ax.loglog(kbins_bias[ix], deltac_2_bias[ix], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins_bias[ix], deltab_2_bias[ix], label="Baryons", color="darkorange", linewidth=2.)
ix = np.where(~np.isnan(deltab_2))
ax.loglog(kbins[ix], deltac_2[ix], color="royalblue", linewidth=2., linestyle="--")
ax.loglog(kbins[ix], deltab_2[ix], color="darkorange", linewidth=2., linestyle="--")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="-", label="Biased")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="--", label="Unbiased")
ax2.plot(kbins_bias[ix], deltac_2_bias[ix]/deltac_2[ix], color="royalblue", linewidth=2.)
ax2.plot(kbins_bias[ix], deltab_2_bias[ix]/deltab_2[ix], color="darkorange", linewidth=2.)
ax2.plot(np.linspace(0.1, 3000), np.ones(50), linestyle=":", color="k", label="Unity")
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":", alpha=0.5)
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":", alpha=0.5)
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="lower left", ncol=2, frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(1, 2000)
ax.set_ylim(1e-8, 2)
ax2.set_ylim(-0.2, 1.2)
ax2.set_ylabel(r"$b(k,v_{bc})$", fontsize=20)
ax2.set_title(r"$|v_{bc,\mathrm{rec}}|$ = 19.06 km s$^{-1}$", fontsize=20)
ax2.legend(loc="lower left", frameon=False, prop={"size" : 20})
plt.setp(ax2.get_xticklabels(), visible=False)
| [
"matplotlib.pylab.gca",
"matplotlib.pylab.subplots",
"numpy.sqrt",
"numpy.ones",
"matplotlib.pylab.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pylab.show"
] | [((3864, 3911), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 6)'}), '(nrows=1, ncols=2, figsize=(12, 6))\n', (3876, 3911), True, 'import matplotlib.pylab as plt\n'), ((4410, 4420), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4418, 4420), True, 'import matplotlib.pylab as plt\n'), ((4501, 4548), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 6)'}), '(nrows=1, ncols=2, figsize=(12, 6))\n', (4513, 4548), True, 'import matplotlib.pylab as plt\n'), ((5186, 5196), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5194, 5196), True, 'import matplotlib.pylab as plt\n'), ((5559, 5585), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (5569, 5585), True, 'import matplotlib.pylab as plt\n'), ((5594, 5641), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(5)', '(4)'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(5, 4, wspace=0.0, hspace=0.0)\n', (5611, 5641), True, 'import matplotlib.gridspec as gridspec\n'), ((310, 319), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pylab as plt\n'), ((1990, 1999), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (1997, 1999), True, 'import matplotlib.pylab as plt\n'), ((6643, 6665), 'numpy.linspace', 'np.linspace', (['(0.1)', '(3000)'], {}), '(0.1, 3000)\n', (6654, 6665), True, 'import numpy as np\n'), ((6667, 6678), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (6674, 6678), True, 'import numpy as np\n'), ((2077, 2096), 'numpy.isnan', 'np.isnan', (['vdeltab_2'], {}), '(vdeltab_2)\n', (2085, 2096), True, 'import numpy as np\n'), ((2483, 2505), 'numpy.sqrt', 'np.sqrt', (['deltab_2_CAMB'], {}), '(deltab_2_CAMB)\n', (2490, 2505), True, 'import numpy as np\n'), ((2562, 2584), 'numpy.sqrt', 'np.sqrt', (['deltac_2_CAMB'], {}), '(deltac_2_CAMB)\n', (2569, 2584), True, 'import numpy as np\n'), ((5797, 5820), 'numpy.isnan', 'np.isnan', (['deltab_2_bias'], {}), '(deltab_2_bias)\n', (5805, 5820), True, 'import numpy as np\n'), ((6038, 6056), 'numpy.isnan', 'np.isnan', (['deltab_2'], {}), '(deltab_2)\n', (6046, 6056), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '../../'))
import os
import random
import argparse
from datetime import datetime
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import torch
import torch.nn as nn
from torch.autograd import grad
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from learning.model import Generator, Discriminator
from robo_utils.oxford.oxford_dataset import GANDataset
from utils import write_params
from carla_utils import parse_yaml_file_unsafe
random.seed(datetime.now())
torch.manual_seed(666)
torch.cuda.manual_seed(666)
torch.set_num_threads(16)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--test_mode', type=bool, default=False, help='test model switch')
parser.add_argument('--dataset_name', type=str, default="train-gan-01", help='name of the dataset')
parser.add_argument('--width', type=int, default=400, help='image width')
parser.add_argument('--height', type=int, default=200, help='image height')
parser.add_argument('--scale', type=float, default=30., help='longitudinal length')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--vector_dim', type=int, default=64, help='vector dim')
parser.add_argument('--points_num', type=int, default=16, help='points number')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='adam: weight_decay')
parser.add_argument('--lr', type=float, default=1e-4, help='adam: learning rate')
parser.add_argument('--n_cpu', type=int, default=16, help='number of cpu threads to use during batch generation')
parser.add_argument('--checkpoint_interval', type=int, default=200, help='interval between model checkpoints')
parser.add_argument('--test_interval', type=int, default=50, help='interval between model test')
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_speed', type=float, default=10., help='max speed')
parser.add_argument('--max_t', type=float, default=3., help='max time')
opt = parser.parse_args()
if opt.test_mode: opt.batch_size = 1
description = 'train GAN'
log_path = 'result/log/'+opt.dataset_name+'/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
logger = SummaryWriter(log_dir=log_path)
write_params(log_path, parser, description)
generator = Generator(input_dim=1+1+opt.vector_dim, output=2).to(device)
discriminator = Discriminator(opt.points_num*2+1).to(device)
# generator.load_state_dict(torch.load('result/saved_models/train-gan-data-01/generator_66600.pth'))
# discriminator.load_state_dict(torch.load('result/saved_models/train-gan-data-01/discriminator_66600.pth'))
start_point_criterion = torch.nn.MSELoss()
criterion = torch.nn.BCELoss()
trajectory_criterion = torch.nn.MSELoss()
g_optimizer = torch.optim.RMSprop(generator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
d_optimizer = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
param = parse_yaml_file_unsafe('../../params/param_oxford.yaml')
train_loader = DataLoader(GANDataset(param, mode='train', opt=opt), batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
train_samples = iter(train_loader)
test_loader = DataLoader(GANDataset(param, mode='eval', opt=opt), batch_size=1, shuffle=False, num_workers=1)
test_samples = iter(test_loader)
def show_traj(fake_traj, real_traj, t, step):
fake_xy = fake_traj
x = fake_xy[:,0]*opt.max_dist
y = fake_xy[:,1]*opt.max_dist
real_xy = real_traj
real_x = real_xy[:,0]*opt.max_dist
real_y = real_xy[:,1]*opt.max_dist
max_x = 30.
max_y = 30.
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(111)
ax1.plot(x, y, label='trajectory', color = 'r', linewidth=5)
ax1.plot(real_x, real_y, label='real-trajectory', color = 'b', linewidth=5, linestyle='--')
ax1.set_xlabel('Forward/(m)')
ax1.set_ylabel('Sideways/(m)')
ax1.set_xlim([0., max_x+5])
ax1.set_ylim([-max_y, max_y])
plt.legend(loc='lower right')
t = max_x*t
plt.legend(loc='lower left')
plt.savefig('result/output/%s/' % opt.dataset_name+str(step)+'_curve.png')
plt.close('all')
def set_requires_grad(nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
class GradientPaneltyLoss(nn.Module):
def __init__(self):
super(GradientPaneltyLoss, self).__init__()
def forward(self, y, x):
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones_like(y)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1))
return torch.mean((dydx_l2norm - 1) ** 2)
fn_GP = GradientPaneltyLoss().to(device)
total_step = 0
for i, batch in enumerate(train_loader):
total_step += 1
batch['t'] = batch['t'].view(-1,1).to(device)
batch['v_0'] = batch['v_0'].view(-1,1).to(device)
batch['v0_array'] = batch['v0_array'].view(-1,1).to(device)
batch['xy'] = batch['xy'].view(-1,2).to(device)
batch['t'].requires_grad = True
real_traj = batch['xy'].view(-1, opt.points_num*2)
real_condition = batch['v_0']
fake_condition = torch.rand_like(real_condition)
batch_fake_condition = fake_condition.unsqueeze(1).expand(opt.batch_size, opt.points_num, 1).reshape(opt.batch_size*opt.points_num, 1)#batch['v0_array']
real_traj_with_condition = torch.cat([real_traj, real_condition], dim=1)
# for generator
noise = torch.randn(opt.batch_size, opt.vector_dim).to(device)
noise = noise.unsqueeze(1)
noise = noise.expand(opt.batch_size, opt.points_num, noise.shape[-1])
noise = noise.reshape(opt.batch_size * opt.points_num, noise.shape[-1])
output_xy = generator(batch_fake_condition, noise, batch['t'])
set_requires_grad(discriminator, True)
discriminator.zero_grad()
pred_real = discriminator(real_traj_with_condition)
fake_traj = output_xy.view(-1, opt.points_num*2)
vx = (opt.max_dist/opt.max_t)*grad(output_xy.view(-1, opt.points_num, 2)[...,0].sum(), batch['t'], create_graph=True)[0]
vy = (opt.max_dist/opt.max_t)*grad(output_xy.view(-1, opt.points_num, 2)[...,1].sum(), batch['t'], create_graph=True)[0]
vxy = torch.cat([vx, vy], dim=1)
start_v = vxy.view(-1, opt.points_num, 2)[:,0]/opt.max_speed
# start point loss
start_points = output_xy.view(-1, opt.points_num, 2)[:,0]
ideal_start_points = torch.zeros(opt.batch_size, 2).to(device)
start_point_loss = start_point_criterion(start_points, ideal_start_points)
start_v_loss = start_point_criterion(torch.norm(start_v, dim=1), fake_condition.squeeze(1))
fake_traj_with_condition = torch.cat([fake_traj.detach(), fake_condition], dim=1)
pred_fake = discriminator(fake_traj_with_condition)
alpha = torch.rand(opt.batch_size, 1)
single_alpha = alpha.to(device)
interpolated_condition = (single_alpha * real_condition.data + (1 - single_alpha) * fake_condition.data).requires_grad_(True)
alpha = alpha.expand_as(real_traj)
alpha = alpha.to(device)
interpolated = (alpha * real_traj.data + (1 - alpha) * fake_traj.detach().data).requires_grad_(True)
output_ = torch.cat([interpolated, interpolated_condition], dim=1)
src_out_ = discriminator(output_)
loss_D_real = torch.mean(pred_real)
loss_D_fake = torch.mean(pred_fake)
loss_D_gp = fn_GP(src_out_, output_)
loss_D = loss_D_fake - loss_D_real + 10*loss_D_gp
loss_D.backward()
torch.nn.utils.clip_grad_value_(discriminator.parameters(), clip_value=1)
d_optimizer.step()
set_requires_grad(discriminator, False)
generator.zero_grad()
fake_traj_with_condition = torch.cat([fake_traj, fake_condition], dim=1)
pred_fake = discriminator(fake_traj_with_condition)
loss_G = -torch.mean(pred_fake) + 10*start_point_loss + 10*start_v_loss
loss_G.backward()
torch.nn.utils.clip_grad_value_(generator.parameters(), clip_value=1)
g_optimizer.step()
logger.add_scalar('train/loss_G', loss_G.item(), total_step)
logger.add_scalar('train/loss_D_real', loss_D_real.item(), total_step)
logger.add_scalar('train/loss_D_fake', loss_D_fake.item(), total_step)
logger.add_scalar('train/loss_D_gp', loss_D_gp.item(), total_step)
if total_step % opt.test_interval == 0:
show_traj(fake_traj.view(-1, 2)[:,:2].view(opt.batch_size, -1, 2).data.cpu().numpy()[0], batch['xy'].view(opt.batch_size, -1, 2).data.cpu().numpy()[0], batch['t'].view(opt.batch_size, -1).data.cpu().numpy()[0], total_step)
if total_step % opt.checkpoint_interval == 0:
torch.save(generator.state_dict(), 'result/saved_models/%s/generator_%d.pth'%(opt.dataset_name, total_step))
torch.save(discriminator.state_dict(), 'result/saved_models/%s/discriminator_%d.pth'%(opt.dataset_name, total_step))
| [
"carla_utils.parse_yaml_file_unsafe",
"learning.model.Generator",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.sum",
"torch.utils.tensorboard.SummaryWriter",
"robo_utils.oxford.oxford_dataset.GANDataset",
"argparse.ArgumentParser",
"torch.mean",
"torch.set_num_threads",
"matplotlib.pypl... | [((249, 300), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (268, 300), True, 'import matplotlib.pyplot as plt\n'), ((675, 697), 'torch.manual_seed', 'torch.manual_seed', (['(666)'], {}), '(666)\n', (692, 697), False, 'import torch\n'), ((698, 725), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(666)'], {}), '(666)\n', (720, 725), False, 'import torch\n'), ((726, 751), 'torch.set_num_threads', 'torch.set_num_threads', (['(16)'], {}), '(16)\n', (747, 751), False, 'import torch\n'), ((835, 860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (858, 860), False, 'import argparse\n'), ((2390, 2461), 'os.makedirs', 'os.makedirs', (["('result/saved_models/%s' % opt.dataset_name)"], {'exist_ok': '(True)'}), "('result/saved_models/%s' % opt.dataset_name, exist_ok=True)\n", (2401, 2461), False, 'import os\n'), ((2462, 2527), 'os.makedirs', 'os.makedirs', (["('result/output/%s' % opt.dataset_name)"], {'exist_ok': '(True)'}), "('result/output/%s' % opt.dataset_name, exist_ok=True)\n", (2473, 2527), False, 'import os\n'), ((3015, 3033), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3031, 3033), False, 'import torch\n'), ((3046, 3064), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (3062, 3064), False, 'import torch\n'), ((3088, 3106), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3104, 3106), False, 'import torch\n'), ((3320, 3376), 'carla_utils.parse_yaml_file_unsafe', 'parse_yaml_file_unsafe', (['"""../../params/param_oxford.yaml"""'], {}), "('../../params/param_oxford.yaml')\n", (3342, 3376), False, 'from carla_utils import parse_yaml_file_unsafe\n'), ((659, 673), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (671, 673), False, 'from datetime import datetime\n'), ((2564, 2595), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_path'}), '(log_dir=log_path)\n', (2577, 2595), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2600, 2643), 'utils.write_params', 'write_params', (['log_path', 'parser', 'description'], {}), '(log_path, parser, description)\n', (2612, 2643), False, 'from utils import write_params\n'), ((3403, 3443), 'robo_utils.oxford.oxford_dataset.GANDataset', 'GANDataset', (['param'], {'mode': '"""train"""', 'opt': 'opt'}), "(param, mode='train', opt=opt)\n", (3413, 3443), False, 'from robo_utils.oxford.oxford_dataset import GANDataset\n'), ((3571, 3610), 'robo_utils.oxford.oxford_dataset.GANDataset', 'GANDataset', (['param'], {'mode': '"""eval"""', 'opt': 'opt'}), "(param, mode='eval', opt=opt)\n", (3581, 3610), False, 'from robo_utils.oxford.oxford_dataset import GANDataset\n'), ((3982, 4008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (3992, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4372), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4353, 4372), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4427), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (4409, 4427), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4527), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4520, 4527), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6260), 'torch.rand_like', 'torch.rand_like', (['real_condition'], {}), '(real_condition)\n', (6244, 6260), False, 'import torch\n'), ((6450, 6495), 'torch.cat', 'torch.cat', (['[real_traj, real_condition]'], {'dim': '(1)'}), '([real_traj, real_condition], dim=1)\n', (6459, 6495), False, 'import torch\n'), ((7285, 7311), 'torch.cat', 'torch.cat', (['[vx, vy]'], {'dim': '(1)'}), '([vx, vy], dim=1)\n', (7294, 7311), False, 'import torch\n'), ((7874, 7903), 'torch.rand', 'torch.rand', (['opt.batch_size', '(1)'], {}), '(opt.batch_size, 1)\n', (7884, 7903), False, 'import torch\n'), ((8263, 8319), 'torch.cat', 'torch.cat', (['[interpolated, interpolated_condition]'], {'dim': '(1)'}), '([interpolated, interpolated_condition], dim=1)\n', (8272, 8319), False, 'import torch\n'), ((8377, 8398), 'torch.mean', 'torch.mean', (['pred_real'], {}), '(pred_real)\n', (8387, 8398), False, 'import torch\n'), ((8417, 8438), 'torch.mean', 'torch.mean', (['pred_fake'], {}), '(pred_fake)\n', (8427, 8438), False, 'import torch\n'), ((8766, 8811), 'torch.cat', 'torch.cat', (['[fake_traj, fake_condition]'], {'dim': '(1)'}), '([fake_traj, fake_condition], dim=1)\n', (8775, 8811), False, 'import torch\n'), ((116, 133), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (123, 133), False, 'from os.path import join, dirname\n'), ((787, 812), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (810, 812), False, 'import torch\n'), ((2657, 2710), 'learning.model.Generator', 'Generator', ([], {'input_dim': '(1 + 1 + opt.vector_dim)', 'output': '(2)'}), '(input_dim=1 + 1 + opt.vector_dim, output=2)\n', (2666, 2710), False, 'from learning.model import Generator, Discriminator\n'), ((2734, 2771), 'learning.model.Discriminator', 'Discriminator', (['(opt.points_num * 2 + 1)'], {}), '(opt.points_num * 2 + 1)\n', (2747, 2771), False, 'from learning.model import Generator, Discriminator\n'), ((5249, 5267), 'torch.ones_like', 'torch.ones_like', (['y'], {}), '(y)\n', (5264, 5267), False, 'import torch\n'), ((5700, 5734), 'torch.mean', 'torch.mean', (['((dydx_l2norm - 1) ** 2)'], {}), '((dydx_l2norm - 1) ** 2)\n', (5710, 5734), False, 'import torch\n'), ((7659, 7685), 'torch.norm', 'torch.norm', (['start_v'], {'dim': '(1)'}), '(start_v, dim=1)\n', (7669, 7685), False, 'import torch\n'), ((5283, 5405), 'torch.autograd.grad', 'torch.autograd.grad', ([], {'outputs': 'y', 'inputs': 'x', 'grad_outputs': 'weight', 'retain_graph': '(True)', 'create_graph': '(True)', 'only_inputs': '(True)'}), '(outputs=y, inputs=x, grad_outputs=weight, retain_graph=\n True, create_graph=True, only_inputs=True)\n', (5302, 5405), False, 'import torch\n'), ((5656, 5683), 'torch.sum', 'torch.sum', (['(dydx ** 2)'], {'dim': '(1)'}), '(dydx ** 2, dim=1)\n', (5665, 5683), False, 'import torch\n'), ((6529, 6572), 'torch.randn', 'torch.randn', (['opt.batch_size', 'opt.vector_dim'], {}), '(opt.batch_size, opt.vector_dim)\n', (6540, 6572), False, 'import torch\n'), ((7492, 7522), 'torch.zeros', 'torch.zeros', (['opt.batch_size', '(2)'], {}), '(opt.batch_size, 2)\n', (7503, 7522), False, 'import torch\n'), ((8882, 8903), 'torch.mean', 'torch.mean', (['pred_fake'], {}), '(pred_fake)\n', (8892, 8903), False, 'import torch\n')] |
import os
class PathManager:
input_folder_label = None
output_folder_label = None
_input_folder_path = None
_output_folder_path = None
_import_file_path = None
_import_file_style = None
@classmethod
def set_input_folder_label(cls, label):
cls.input_folder_label = label
@classmethod
def set_output_folder_label(cls, label):
cls.output_folder_label = label
@classmethod
def get_input_path(cls, file_name=None):
result = cls._input_folder_path
if file_name is not None:
result = os.path.join(cls._input_folder_path, file_name)
return result
@classmethod
def get_output_path(cls, file_name=None):
result = cls._output_folder_path
if file_name is not None:
result = os.path.join(cls._output_folder_path, file_name)
return result
@classmethod
def set_input_path(cls, path):
cls._input_folder_path = os.path.abspath(path)
if cls.input_folder_label is not None:
cls.input_folder_label.text = f"Input folder: {cls._input_folder_path}"
@classmethod
def set_output_path(cls, path):
cls._output_folder_path = os.path.abspath(path)
if cls.output_folder_label is not None:
cls.output_folder_label.text = f"Output folder: {cls._output_folder_path}"
@classmethod
def input_path_exists(cls, path=None):
all_path = cls.get_input_path(path)
return os.path.exists(all_path)
@classmethod
def output_path_exists(cls, path):
all_path = cls.get_output_path(path)
return os.path.exists(all_path)
@classmethod
def open_input_file(cls, file_name, mode):
full_path = os.path.join(cls.get_input_path(), file_name)
return cls._open_file(full_path, mode)
@classmethod
def open_output_file(cls, file_name, mode):
full_path = os.path.join(cls.get_output_path(), file_name)
return cls._open_file(full_path, mode)
@classmethod
def set_import_file(cls, file_name, style):
full_path = os.path.abspath(file_name)
cls._import_file_path = full_path
cls._import_file_style = style
@classmethod
def get_import_path(cls):
return cls._import_file_path
@classmethod
def get_import_style(cls):
return cls._import_file_style
@classmethod
def _open_file(cls, file_name, mode):
return open(f'{file_name}', f'{mode}', encoding='utf-8', newline='\n')
| [
"os.path.abspath",
"os.path.exists",
"os.path.join"
] | [((845, 866), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (860, 866), False, 'import os\n'), ((1059, 1080), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (1074, 1080), False, 'import os\n'), ((1303, 1327), 'os.path.exists', 'os.path.exists', (['all_path'], {}), '(all_path)\n', (1317, 1327), False, 'import os\n'), ((1427, 1451), 'os.path.exists', 'os.path.exists', (['all_path'], {}), '(all_path)\n', (1441, 1451), False, 'import os\n'), ((1850, 1876), 'os.path.abspath', 'os.path.abspath', (['file_name'], {}), '(file_name)\n', (1865, 1876), False, 'import os\n'), ((506, 553), 'os.path.join', 'os.path.join', (['cls._input_folder_path', 'file_name'], {}), '(cls._input_folder_path, file_name)\n', (518, 553), False, 'import os\n'), ((705, 753), 'os.path.join', 'os.path.join', (['cls._output_folder_path', 'file_name'], {}), '(cls._output_folder_path, file_name)\n', (717, 753), False, 'import os\n')] |
"""Views fo the node settings page."""
# -*- coding: utf-8 -*-
import logging
import httplib as http
from dropbox.rest import ErrorResponse
from dropbox.client import DropboxClient
from urllib3.exceptions import MaxRetryError
from framework.exceptions import HTTPError
from website.addons.dropbox.serializer import DropboxSerializer
from website.addons.base import generic_views
logger = logging.getLogger(__name__)
debug = logger.debug
SHORT_NAME = 'dropbox'
FULL_NAME = 'Dropbox'
dropbox_account_list = generic_views.account_list(
SHORT_NAME,
DropboxSerializer
)
dropbox_import_auth = generic_views.import_auth(
SHORT_NAME,
DropboxSerializer
)
def _get_folders(node_addon, folder_id):
node = node_addon.owner
if folder_id is None:
return [{
'id': '/',
'path': '/',
'addon': 'dropbox',
'kind': 'folder',
'name': '/ (Full Dropbox)',
'urls': {
'folders': node.api_url_for('dropbox_folder_list', folderId='/'),
}
}]
client = DropboxClient(node_addon.external_account.oauth_key)
file_not_found = HTTPError(http.NOT_FOUND, data=dict(message_short='File not found',
message_long='The Dropbox file '
'you requested could not be found.'))
max_retry_error = HTTPError(http.REQUEST_TIMEOUT, data=dict(message_short='Request Timeout',
message_long='Dropbox could not be reached '
'at this time.'))
try:
metadata = client.metadata(folder_id)
except ErrorResponse:
raise file_not_found
except MaxRetryError:
raise max_retry_error
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise file_not_found
return [
{
'addon': 'dropbox',
'kind': 'folder',
'id': item['path'],
'name': item['path'].split('/')[-1],
'path': item['path'],
'urls': {
'folders': node.api_url_for('dropbox_folder_list', folderId=item['path']),
}
}
for item in metadata['contents']
if item['is_dir']
]
dropbox_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
dropbox_get_config = generic_views.get_config(
SHORT_NAME,
DropboxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
dropbox_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
DropboxSerializer,
_set_folder
)
dropbox_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
dropbox_root_folder = generic_views.root_folder(
SHORT_NAME
)
| [
"logging.getLogger",
"website.addons.base.generic_views.deauthorize_node",
"website.addons.base.generic_views.import_auth",
"website.addons.base.generic_views.root_folder",
"website.addons.base.generic_views.set_config",
"website.addons.base.generic_views.folder_list",
"website.addons.base.generic_views... | [((391, 418), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (408, 418), False, 'import logging\n'), ((510, 567), 'website.addons.base.generic_views.account_list', 'generic_views.account_list', (['SHORT_NAME', 'DropboxSerializer'], {}), '(SHORT_NAME, DropboxSerializer)\n', (536, 567), False, 'from website.addons.base import generic_views\n'), ((601, 657), 'website.addons.base.generic_views.import_auth', 'generic_views.import_auth', (['SHORT_NAME', 'DropboxSerializer'], {}), '(SHORT_NAME, DropboxSerializer)\n', (626, 657), False, 'from website.addons.base import generic_views\n'), ((2357, 2419), 'website.addons.base.generic_views.folder_list', 'generic_views.folder_list', (['SHORT_NAME', 'FULL_NAME', '_get_folders'], {}), '(SHORT_NAME, FULL_NAME, _get_folders)\n', (2382, 2419), False, 'from website.addons.base import generic_views\n'), ((2456, 2511), 'website.addons.base.generic_views.get_config', 'generic_views.get_config', (['SHORT_NAME', 'DropboxSerializer'], {}), '(SHORT_NAME, DropboxSerializer)\n', (2480, 2511), False, 'from website.addons.base import generic_views\n'), ((2675, 2754), 'website.addons.base.generic_views.set_config', 'generic_views.set_config', (['SHORT_NAME', 'FULL_NAME', 'DropboxSerializer', '_set_folder'], {}), '(SHORT_NAME, FULL_NAME, DropboxSerializer, _set_folder)\n', (2699, 2754), False, 'from website.addons.base import generic_views\n'), ((2801, 2843), 'website.addons.base.generic_views.deauthorize_node', 'generic_views.deauthorize_node', (['SHORT_NAME'], {}), '(SHORT_NAME)\n', (2831, 2843), False, 'from website.addons.base import generic_views\n'), ((2873, 2910), 'website.addons.base.generic_views.root_folder', 'generic_views.root_folder', (['SHORT_NAME'], {}), '(SHORT_NAME)\n', (2898, 2910), False, 'from website.addons.base import generic_views\n'), ((1075, 1127), 'dropbox.client.DropboxClient', 'DropboxClient', (['node_addon.external_account.oauth_key'], {}), '(node_addon.external_account.oauth_key)\n', (1088, 1127), False, 'from dropbox.client import DropboxClient\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from unittest.mock import patch
import numpy as np
from ...common import testing
from . import core
@testing.parametrized(
bragg=("bragg", [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, 226.69, 193.11]),
morpho=("morpho", [280.36, 52.96, 208.16, 72.69, 89.92, 60.37, 226.69, 193.11]),
chirped=("chirped", [280.36, 52.96, 104.08, 36.34, 31.53, 15.98, 226.69, 193.11]),
)
def test_photonics_transforms(pb: str, expected: List[float]) -> None:
np.random.seed(24)
with patch("shutil.which", return_value="here"):
func = core.Photonics(pb, 16) # should be 8... but it is actually not allowed. Nevermind here, HACK IT NEXT LINE
func.instrumentation.args[0]._dimension = 8 # type: ignore
x = np.random.normal(0, 1, size=8)
(output,), _ = func.instrumentation.data_to_arguments(x)
np.testing.assert_almost_equal(output, expected, decimal=2)
np.random.seed(24)
x2 = np.random.normal(0, 1, size=8)
np.testing.assert_almost_equal(x, x2, decimal=2, err_msg="x was modified in the process")
def test_morpho_transform_constraints() -> None:
with patch("shutil.which", return_value="here"):
func = core.Photonics("morpho", 60)
x = np.random.normal(0, 5, size=60) # std 5 to play with boundaries
(output,), _ = func.instrumentation.data_to_arguments(x)
assert np.all(output >= 0)
q = len(x) // 4
assert np.all(output[:q] <= 300)
assert np.all(output[q: 3 * q] <= 600)
assert np.all(output[2 * q: 3 * q] >= 30)
assert np.all(output[3 * q:] <= 300)
def test_photonics() -> None:
with patch("shutil.which", return_value="here"):
photo = core.Photonics("bragg", 16)
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
output = photo(np.zeros(16))
np.testing.assert_equal(output, 12)
# check error
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n"):
np.testing.assert_raises(RuntimeError, photo, np.zeros(16).tolist())
np.testing.assert_raises(AssertionError, photo, np.zeros(12).tolist())
| [
"numpy.random.normal",
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.random.seed",
"numpy.all",
"unittest.mock.patch"
] | [((674, 692), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (688, 692), True, 'import numpy as np\n'), ((940, 970), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (956, 970), True, 'import numpy as np\n'), ((1036, 1095), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output', 'expected'], {'decimal': '(2)'}), '(output, expected, decimal=2)\n', (1066, 1095), True, 'import numpy as np\n'), ((1100, 1118), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (1114, 1118), True, 'import numpy as np\n'), ((1128, 1158), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (1144, 1158), True, 'import numpy as np\n'), ((1163, 1257), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x2'], {'decimal': '(2)', 'err_msg': '"""x was modified in the process"""'}), "(x, x2, decimal=2, err_msg=\n 'x was modified in the process')\n", (1193, 1257), True, 'import numpy as np\n'), ((1409, 1440), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)'], {'size': '(60)'}), '(0, 5, size=60)\n', (1425, 1440), True, 'import numpy as np\n'), ((1546, 1565), 'numpy.all', 'np.all', (['(output >= 0)'], {}), '(output >= 0)\n', (1552, 1565), True, 'import numpy as np\n'), ((1597, 1622), 'numpy.all', 'np.all', (['(output[:q] <= 300)'], {}), '(output[:q] <= 300)\n', (1603, 1622), True, 'import numpy as np\n'), ((1634, 1664), 'numpy.all', 'np.all', (['(output[q:3 * q] <= 600)'], {}), '(output[q:3 * q] <= 600)\n', (1640, 1664), True, 'import numpy as np\n'), ((1677, 1710), 'numpy.all', 'np.all', (['(output[2 * q:3 * q] >= 30)'], {}), '(output[2 * q:3 * q] >= 30)\n', (1683, 1710), True, 'import numpy as np\n'), ((1723, 1752), 'numpy.all', 'np.all', (['(output[3 * q:] <= 300)'], {}), '(output[3 * q:] <= 300)\n', (1729, 1752), True, 'import numpy as np\n'), ((2139, 2174), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output', '(12)'], {}), '(output, 12)\n', (2162, 2174), True, 'import numpy as np\n'), ((702, 744), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (707, 744), False, 'from unittest.mock import patch\n'), ((1313, 1355), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1318, 1355), False, 'from unittest.mock import patch\n'), ((1794, 1836), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1799, 1836), False, 'from unittest.mock import patch\n'), ((1891, 1988), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (1896, 1988), False, 'from unittest.mock import patch\n'), ((2202, 2295), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n')\n", (2207, 2295), False, 'from unittest.mock import patch\n'), ((1999, 2096), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (2004, 2096), False, 'from unittest.mock import patch\n'), ((2121, 2133), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2129, 2133), True, 'import numpy as np\n'), ((2422, 2434), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2430, 2434), True, 'import numpy as np\n'), ((2347, 2359), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2355, 2359), True, 'import numpy as np\n')] |
from math import sqrt
from collections import namedtuple
import torch
from e3nn import o3
from e3nn.util import eval_code
def _prod(x):
out = 1
for a in x:
out *= a
return out
class TensorProduct(torch.nn.Module):
r"""Tensor Product with parametrizable paths
Parameters
----------
in1 : `Irreps` or list of tuple
List of first inputs ``(multiplicity, irrep[, variance])``.
in2 : `Irreps` or list of tuple
List of second inputs ``(multiplicity, irrep[, variance])``.
out : `Irreps` or list of tuple
List of outputs ``(multiplicity, irrep[, variance])``.
instructions : list of tuple
List of instructions ``(i_1, i_2, i_out, mode, train[, path_weight])``
it means: Put ``in1[i_1]`` :math:`\otimes` ``in2[i_2]`` into ``out[i_out]``
* mode: determines the way the multiplicities are treated, "uvw" is fully connected
* train: `True` of `False` if this path is weighed by a parameter
* path weight: how much this path should contribute to the output
normalization : {'component', 'norm'}
the way it is assumed the representation are normalized. If it is set to "norm":
.. math::
\| x \| = \| y \| = 1 \Longrightarrow \| x \otimes y \| = 1
internal_weights : bool
does the instance of the class contains the parameters
shared_weights : bool
are the parameters shared among the inputs extra dimensions
* `True` :math:`z_i = w x_i \otimes y_i`
* `False` :math:`z_i = w_i x_i \otimes y_i`
where here :math:`i` denotes a *batch-like* index
Examples
--------
Create a module that computes elementwise the cross-product of 16 vectors with 16 vectors :math:`z_u = x_u \wedge y_u`
>>> module = TensorProduct(
... "16x1o", "16x1o", "16x1e",
... [
... (0, 0, 0, "uuu", False)
... ]
... )
Now mix all 16 vectors with all 16 vectors to makes 16 pseudo-vectors :math:`z_w = \sum_{u,v} w_{uvw} x_u \wedge y_v`
>>> module = TensorProduct(
... [(16, (1, -1))],
... [(16, (1, -1))],
... [(16, (1, 1))],
... [
... (0, 0, 0, "uvw", True)
... ]
... )
With custom input variance and custom path weights:
>>> module = TensorProduct(
... "8x0o + 8x1o",
... [(16, "1o", 1/16)],
... "16x1e",
... [
... (0, 0, 0, "uvw", True, 3),
... (1, 0, 0, "uvw", True, 1),
... ]
... )
Example of a dot product:
>>> irreps = o3.Irreps("3x0e + 4x0o + 1e + 2o + 3o")
>>> module = TensorProduct(irreps, irreps, "0e", [
... (i, i, 0, 'uuw', False)
... for i, (mul, ir) in enumerate(irreps)
... ])
Implement :math:`z_u = x_u \otimes (\sum_v w_{uv} y_v)`
>>> module = TensorProduct(
... "8x0o + 7x1o + 3x2e",
... "10x0e + 10x1e + 10x2e",
... "8x0o + 7x1o + 3x2e",
... [
... # paths for the l=0:
... (0, 0, 0, "uvu", True), # 0x0->0
... # paths for the l=1:
... (1, 0, 1, "uvu", True), # 1x0->1
... (1, 1, 1, "uvu", True), # 1x1->1
... (1, 2, 1, "uvu", True), # 1x2->1
... # paths for the l=2:
... (2, 0, 2, "uvu", True), # 2x0->2
... (2, 1, 2, "uvu", True), # 2x1->2
... (2, 2, 2, "uvu", True), # 2x2->2
... ]
... )
Tensor Product using the xavier uniform initialization:
>>> irreps_1 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_2 = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> irreps_out = o3.Irreps("5x0e + 10x1o + 1x2e")
>>> # create a Fully Connected Tensor Product
>>> module = o3.TensorProduct(
... irreps_1,
... irreps_2,
... irreps_out,
... [
... (i_1, i_2, i_out, "uvw", True, mul_1 * mul_2)
... for i_1, (mul_1, ir_1) in enumerate(irreps_1)
... for i_2, (mul_2, ir_2) in enumerate(irreps_2)
... for i_out, (mul_out, ir_out) in enumerate(irreps_out)
... if ir_out in ir_1 * ir_2
... ]
... )
>>> with torch.no_grad():
... for weight in module.parameters():
... # formula from torch.nn.init.xavier_uniform_
... mul_1, mul_2, mul_out = weight.shape
... a = (6 / (mul_1 * mul_2 + mul_out))**0.5
... _ = weight.uniform_(-a, a) # `_ = ` is only here because of pytest
>>> n = 1_000
>>> vars = module(irreps_1.randn(n, -1), irreps_2.randn(n, -1)).var(0)
>>> assert vars.min() > 1 / 3
>>> assert vars.max() < 3
"""
def __init__(
self,
in1,
in2,
out,
instructions,
normalization='component',
internal_weights=None,
shared_weights=None,
_specialized_code=True,
):
super().__init__()
assert normalization in ['component', 'norm'], normalization
if shared_weights is False and internal_weights is None:
internal_weights = False
if shared_weights is None:
shared_weights = True
if internal_weights is None:
internal_weights = True
assert shared_weights or not internal_weights
try:
in1 = o3.Irreps(in1)
except AssertionError:
pass
try:
in2 = o3.Irreps(in2)
except AssertionError:
pass
try:
out = o3.Irreps(out)
except AssertionError:
pass
in1 = [x if len(x) == 3 else x + (1.0,) for x in in1]
in2 = [x if len(x) == 3 else x + (1.0,) for x in in2]
out = [x if len(x) == 3 else x + (1.0,) for x in out]
self.irreps_in1 = o3.Irreps([(mul, ir) for mul, ir, _var in in1])
self.irreps_in2 = o3.Irreps([(mul, ir) for mul, ir, _var in in2])
self.irreps_out = o3.Irreps([(mul, ir) for mul, ir, _var in out])
in1_var = [var for _, _, var in in1]
in2_var = [var for _, _, var in in2]
out_var = [var for _, _, var in out]
self.shared_weights = shared_weights
z = '' if self.shared_weights else 'z'
# == TorchScript main operation templates ==
# The if-else block is needed to avoid an internal TorchScript compiler bug related to the early return.
code_out = f"""
from typing import List
import torch
from e3nn.util import broadcast_tensors
@torch.jit.script
def main(x1: torch.Tensor, x2: torch.Tensor, ws: List[torch.Tensor], w3j: List[torch.Tensor]) -> torch.Tensor:
x1, x2 = broadcast_tensors(x1, x2)
size = x1.shape[:-1]
outsize = size + ({self.irreps_out.dim},)
assert x1.shape[-1] == {self.irreps_in1.dim}, "Incorrect feature dimension for x1"
assert x2.shape[-1] == {self.irreps_in2.dim}, "Incorrect feature dimension for x2"
x1 = x1.reshape(-1, {self.irreps_in1.dim})
x2 = x2.reshape(-1, {self.irreps_in2.dim})
if x1.shape[0] == 0:
return x1.new_zeros(outsize)
else:
batch = x1.shape[0]
out = x1.new_zeros((batch, {self.irreps_out.dim}))
ein = torch.einsum
"""
code_right = f"""
from typing import List
import torch
from e3nn.util import broadcast_tensors
@torch.jit.script
def main(x2: torch.Tensor, ws: List[torch.Tensor], w3j: List[torch.Tensor]) -> torch.Tensor:
size = x2.shape[:-1]
outsize = size + ({self.irreps_in1.dim}, {self.irreps_out.dim},)
assert x2.shape[-1] == {self.irreps_in2.dim}, "Incorrect feature dimension for x2"
x2 = x2.reshape(-1, {self.irreps_in2.dim})
if x2.shape[0] == 0:
return x2.new_zeros(outsize)
else:
batch = x2.shape[0]
out = x2.new_zeros((batch, {self.irreps_in1.dim}, {self.irreps_out.dim}))
ein = torch.einsum
"""
# == end TorchScript templates ==
# Put everything in the else block
base_indent = 2
def indent_for_level(indent_level):
return ((base_indent + indent_level) * 4) * " "
s = indent_for_level(0)
wigners = []
for i_1, (mul_1, (l_1, p_1)) in enumerate(self.irreps_in1):
index_1 = self.irreps_in1[:i_1].dim
dim_1 = mul_1 * (2 * l_1 + 1)
code_out += f"{s}x1_{i_1} = x1[:, {index_1}:{index_1+dim_1}].reshape(batch, {mul_1}, {2 * l_1 + 1})\n"
code_out += "\n"
for i_2, (mul_2, (l_2, p_2)) in enumerate(self.irreps_in2):
index_2 = self.irreps_in2[:i_2].dim
dim_2 = mul_2 * (2 * l_2 + 1)
line = f"{s}x2_{i_2} = x2[:, {index_2}:{index_2+dim_2}].reshape(batch, {mul_2}, {2 * l_2 + 1})\n"
code_out += line
code_right += line
code_out += "\n"
code_right += "\n"
last_ss = None
Instruction = namedtuple("Instruction", "i_in1, i_in2, i_out, connection_mode, has_weight, path_weight, weight_shape")
instructions = [x if len(x) == 6 else x + (1.0,) for x in instructions]
self.instructions = [
Instruction(
i_in1, i_in2, i_out, connection_mode, has_weight, path_weight,
{
'uvw': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul, self.irreps_out[i_out].mul),
'uvu': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uvv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
'uuw': (self.irreps_in1[i_in1].mul, self.irreps_out[i_out].mul),
'uuu': (self.irreps_in1[i_in1].mul,),
'uvuv': (self.irreps_in1[i_in1].mul, self.irreps_in2[i_in2].mul),
}[connection_mode] if has_weight else None
)
for i_in1, i_in2, i_out, connection_mode, has_weight, path_weight in instructions
]
index_w = -1
for ins in self.instructions:
mul_1, (l_1, p_1) = self.irreps_in1[ins.i_in1]
mul_2, (l_2, p_2) = self.irreps_in2[ins.i_in2]
mul_out, (l_out, p_out) = self.irreps_out[ins.i_out]
dim_1 = mul_1 * (2 * l_1 + 1)
dim_2 = mul_2 * (2 * l_2 + 1)
dim_out = mul_out * (2 * l_out + 1)
index_1 = self.irreps_in1[:ins.i_in1].dim
index_2 = self.irreps_in2[:ins.i_in2].dim
index_out = self.irreps_out[:ins.i_out].dim
assert p_1 * p_2 == p_out
assert abs(l_1 - l_2) <= l_out <= l_1 + l_2
if dim_1 == 0 or dim_2 == 0 or dim_out == 0:
continue
alpha = ins.path_weight * out_var[ins.i_out] / sum(in1_var[i.i_in1] * in2_var[i.i_in2] for i in self.instructions if i.i_out == ins.i_out)
s = indent_for_level(0)
line = (
f"{s}with torch.autograd.profiler.record_function("
f"'{self.irreps_in1[ins.i_in1:ins.i_in1+1]} x {self.irreps_in2[ins.i_in2:ins.i_in2+1]} "
f"= {self.irreps_out[ins.i_out:ins.i_out+1]} {ins.connection_mode} {ins.has_weight}'):\n"
)
code_out += line
code_right += line
s = indent_for_level(1)
code_out += f"{s}s1 = x1_{ins.i_in1}\n"
code_right += f"{s}e1 = torch.eye({mul_1}, dtype=x2.dtype, device=x2.device)\n"
line = f"{s}s2 = x2_{ins.i_in2}\n"
code_out += line
code_right += line
assert ins.connection_mode in ['uvw', 'uvu', 'uvv', 'uuw', 'uuu', 'uvuv']
alpha = sqrt(alpha / {
'uvw': (mul_1 * mul_2),
'uvu': mul_2,
'uvv': mul_1,
'uuw': mul_1,
'uuu': 1,
'uvuv': 1,
}[ins.connection_mode])
if ins.has_weight:
index_w += 1
line_out = f"{s}out[:, {index_out}:{index_out+dim_out}] += {alpha} * {{}}.reshape(batch, {dim_out})\n\n"
line_right = f"{s}out[:, {index_1}:{index_1+dim_1}, {index_out}:{index_out+dim_out}] += {alpha} * {{}}.reshape(batch, {dim_1}, {dim_out})\n\n"
if _specialized_code:
# optimized code for special cases:
# 0 x 0 = 0
# 0 x L = L
# L x 0 = L
# L x L = 0
# 1 x 1 = 1
if (l_1, l_2, l_out) == (0, 0, 0) and ins.connection_mode in ['uvw', 'uvu'] and normalization in ['component', 'norm'] and ins.has_weight:
code_out += f"{s}s1 = s1.reshape(batch, {mul_1})\n"
line = f"{s}s2 = s2.reshape(batch, {mul_2})\n"
code_out += line
code_right += line
if ins.connection_mode == 'uvw':
code_out += line_out.format(f"ein('{z}uvw,zu,zv->zw', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uvw,zv->zuw', ws[{index_w}], s2)")
if ins.connection_mode == 'uvu':
code_out += line_out.format(f"ein('{z}uv,zu,zv->zu', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uv,uw,zv->zuw', ws[{index_w}], e1, s2)")
continue
if l_1 == 0 and l_2 == l_out and ins.connection_mode in ['uvw', 'uvu'] and normalization == 'component' and ins.has_weight:
code_out += f"{s}s1 = s1.reshape(batch, {mul_1})\n"
if ins.connection_mode == 'uvw':
code_out += line_out.format(f"ein('{z}uvw,zu,zvi->zwi', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uvw,zvi->zuwi', ws[{index_w}], s2)")
if ins.connection_mode == 'uvu':
code_out += line_out.format(f"ein('{z}uv,zu,zvi->zui', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uv,uw,zvi->zuwi', ws[{index_w}], e1, s2)")
continue
if l_1 == l_out and l_2 == 0 and ins.connection_mode in ['uvw', 'uvu'] and normalization == 'component' and ins.has_weight:
code_out += f"{s}s2 = s2.reshape(batch, {mul_2})\n"
code_right += f"{s}s2 = s2.reshape(batch, {mul_2})\n"
code_right += f"{s}wig = torch.eye({2 * l_1 + 1}, dtype=x2.dtype, device=x2.device)\n"
if ins.connection_mode == 'uvw':
code_out += line_out.format(f"ein('{z}uvw,zui,zv->zwi', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uvw,ij,zv->zuiwj', ws[{index_w}], wig, s2)")
if ins.connection_mode == 'uvu':
code_out += line_out.format(f"ein('{z}uv,zui,zv->zui', ws[{index_w}], s1, s2)")
code_right += line_right.format(f"ein('{z}uv,ij,uw,zv->zuiwj', ws[{index_w}], wig, e1, s2)")
continue
if l_1 == l_2 and l_out == 0 and ins.connection_mode == 'uvw' and normalization == 'component' and ins.has_weight:
# Cl_l_0 = eye / sqrt(2L+1)
code_out += line_out.format(f"ein('{z}uvw,zui,zvi->zw', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, s1, s2)")
code_right += line_right.format(f"ein('{z}uvw,zvi->zuiw', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, s2)")
continue
if l_1 == l_2 and l_out == 0 and ins.connection_mode == 'uvu' and normalization == 'component' and ins.has_weight:
# Cl_l_0 = eye / sqrt(2L+1)
code_out += line_out.format(f"ein('{z}uv,zui,zvi->zu', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, s1, s2)")
code_right += line_right.format(f"ein('{z}uv,uw,zvi->zuiw', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, e1, s2)")
continue
if l_1 == l_2 and l_out == 0 and ins.connection_mode == 'uuu' and normalization == 'component' and ins.has_weight:
# Cl_l_0 = eye / sqrt(2L+1)
code_out += line_out.format(f"ein('{z}u,zui,zui->zu', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, s1, s2)")
code_right += line_right.format(f"ein('{z}u,uw,zui->zuiw', ws[{index_w}] / {sqrt(2 * l_1 + 1)}, e1, s2)")
continue
if l_1 == l_2 and l_out == 0 and ins.connection_mode == 'uuu' and normalization == 'component' and not ins.has_weight:
# Cl_l_0 = eye / sqrt(2L+1)
code_out += line_out.format(f"ein('zui,zui->zu', s1, s2).div({sqrt(2 * l_1 + 1)})")
code_right += line_right.format(f"ein('uw,zui->zuiw', e1, s2).div({sqrt(2 * l_1 + 1)})")
continue
if (l_1, l_2, l_out) == (1, 1, 1) and ins.connection_mode == 'uvw' and normalization == 'component' and ins.has_weight:
# C1_1_1 = levi-civita / sqrt(2)
code_out += f"{s}s1 = s1.reshape(batch, {mul_1}, 1, {2 * l_1 + 1})\n"
code_out += f"{s}s2 = s2.reshape(batch, 1, {mul_2}, {2 * l_2 + 1})\n"
code_out += f"{s}s1, s2 = torch.broadcast_tensors(s1, s2)\n"
code_out += line_out.format(f"ein('{z}uvw,zuvi->zwi', ws[{index_w}] / {sqrt(2)}, torch.cross(s1, s2, dim=3))")
if (l_1, l_2, l_out) in wigners:
index_w3j = wigners.index((l_1, l_2, l_out))
else:
index_w3j = len(wigners)
wigners += [(l_1, l_2, l_out)]
code_right += line_right.format(f"ein('{z}uvw,ijk,zvj->zuiwk', ws[{index_w}], w3j[{index_w3j}], s2)")
continue
if (l_1, l_2, l_out) == (1, 1, 1) and ins.connection_mode == 'uvu' and normalization == 'component' and ins.has_weight:
# C1_1_1 = levi-civita / sqrt(2)
code_out += f"{s}s1 = s1.reshape(batch, {mul_1}, 1, {2 * l_1 + 1})\n"
code_out += f"{s}s2 = s2.reshape(batch, 1, {mul_2}, {2 * l_2 + 1})\n"
code_out += f"{s}s1, s2 = torch.broadcast_tensors(s1, s2)\n"
code_out += line_out.format(f"ein('{z}uv,zuvi->zui', ws[{index_w}] / {sqrt(2)}, torch.cross(s1, s2, dim=3))")
if (l_1, l_2, l_out) in wigners:
index_w3j = wigners.index((l_1, l_2, l_out))
else:
index_w3j = len(wigners)
wigners += [(l_1, l_2, l_out)]
code_right += line_right.format(f"ein('{z}uv,ijk,uw,zvj->zuiwk', ws[{index_w}], w3j[{index_w3j}], e1, s2)")
continue
if last_ss != (ins.i_in1, ins.i_in2, ins.connection_mode[:2]):
if ins.connection_mode[:2] == 'uv':
code_out += f"{s}ss = ein('zui,zvj->zuvij', s1, s2)\n"
if ins.connection_mode[:2] == 'uu':
code_out += f"{s}ss = ein('zui,zuj->zuij', s1, s2)\n"
last_ss = (ins.i_in1, ins.i_in2, ins.connection_mode[:2])
if (l_1, l_2, l_out) in wigners:
index_w3j = wigners.index((l_1, l_2, l_out))
else:
index_w3j = len(wigners)
wigners += [(l_1, l_2, l_out)]
if ins.connection_mode == 'uvw':
assert ins.has_weight
code_out += line_out.format(f"ein('{z}uvw,ijk,zuvij->zwk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}uvw,ijk,zvj->zuiwk', ws[{index_w}], w3j[{index_w3j}], s2)")
if ins.connection_mode == 'uvu':
assert mul_1 == mul_out
if ins.has_weight:
code_out += line_out.format(f"ein('{z}uv,ijk,zuvij->zuk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}uv,ijk,uw,zvj->zuiwk', ws[{index_w}], w3j[{index_w3j}], e1, s2)")
else:
code_out += line_out.format(f"ein('ijk,zuvij->zuk', w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('ijk,uw,zvj->zuiwk', w3j[{index_w3j}], e1, s2)")
if ins.connection_mode == 'uvv':
assert mul_2 == mul_out
if ins.has_weight:
code_out += line_out.format(f"ein('{z}uv,ijk,zuvij->zvk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}uv,ijk,zvj->zuivk', ws[{index_w}], w3j[{index_w3j}], s2)")
else:
code_out += line_out.format(f"ein('ijk,zuvij->zvk', w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('u,ijk,zvj->zuivk', s2.new_zeros({mul_1}).fill_(1.0), w3j[{index_w3j}], s2)")
if ins.connection_mode == 'uuw':
assert mul_1 == mul_2
if ins.has_weight:
code_out += line_out.format(f"ein('{z}uw,ijk,zuij->zwk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}uw,ijk,zuj->zuiwk', ws[{index_w}], w3j[{index_w3j}], s2)")
else:
assert mul_out == 1
code_out += line_out.format(f"ein('ijk,zuij->zk', w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('ijk,zuj->zuik', w3j[{index_w3j}], s2)")
if ins.connection_mode == 'uuu':
assert mul_1 == mul_2 == mul_out
if ins.has_weight:
code_out += line_out.format(f"ein('{z}u,ijk,zuij->zuk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}u,ijk,uw,zuj->zuiwk', ws[{index_w}], w3j[{index_w3j}], e1, s2)")
else:
code_out += line_out.format(f"ein('ijk,zuij->zuk', w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('ijk,uw,zuj->zuiwk', w3j[{index_w3j}], e1, s2)")
if ins.connection_mode == 'uvuv':
assert mul_1 * mul_2 == mul_out
if ins.has_weight:
code_out += line_out.format(f"ein('{z}uv,ijk,zuvij->zuvk', ws[{index_w}], w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('{z}uv,ijk,uw,zvj->zuiwvk', ws[{index_w}], w3j[{index_w3j}], e1, s2)")
else:
code_out += line_out.format(f"ein('ijk,zuvij->zuvk', w3j[{index_w3j}], ss)")
code_right += line_right.format(f"ein('ijk,uw,zvj->zuiwvk', w3j[{index_w3j}], e1, s2)")
code_out += "\n"
code_out += f"{s}return out.reshape(outsize)"
code_right += f"{s}return out.reshape(outsize)"
self.code_out = code_out
self._compiled_main_out = eval_code(self.code_out).main
self.code_right = code_right
self._compiled_main_right = eval_code(self.code_right).main
# w3j
self.wigners = wigners
for i, (l_1, l_2, l_out) in enumerate(self.wigners):
wig = o3.wigner_3j(l_1, l_2, l_out)
if normalization == 'component':
wig *= (2 * l_out + 1) ** 0.5
if normalization == 'norm':
wig *= (2 * l_1 + 1) ** 0.5 * (2 * l_2 + 1) ** 0.5
self.register_buffer(f"C{i}", wig)
# weights
self.weight_numel = sum(_prod(ins.weight_shape) for ins in self.instructions if ins.has_weight)
if internal_weights:
assert self.shared_weights, "Having internal weights impose shared weights"
self.weight = torch.nn.ParameterDict()
for ins in self.instructions:
if ins.has_weight:
name = f'[{ins.i_in1}:{self.irreps_in1[ins.i_in1]}] x [{ins.i_in2}:{self.irreps_in2[ins.i_in2]}] -> [{ins.i_out}:{self.irreps_out[ins.i_out]}]'
self.weight[name] = torch.nn.Parameter(torch.randn(ins.weight_shape))
output_mask = torch.cat([
torch.ones(mul * ir.dim)
if any(i.i_out == i_out and i.path_weight > 0 for i in self.instructions)
else torch.zeros(mul * ir.dim)
for i_out, (mul, ir) in enumerate(self.irreps_out)
])
self.register_buffer('output_mask', output_mask)
self.to(dtype=torch.get_default_dtype())
def __repr__(self):
npath = sum(
{
'uvw': self.irreps_in1[i.i_in1].mul * self.irreps_in2[i.i_in2].mul * self.irreps_out[i.i_out].mul,
'uvu': self.irreps_in1[i.i_in1].mul * self.irreps_in2[i.i_in2].mul,
'uvv': self.irreps_in1[i.i_in1].mul * self.irreps_in2[i.i_in2].mul,
'uuw': self.irreps_in1[i.i_in1].mul * self.irreps_out[i.i_out].mul,
'uuu': self.irreps_in1[i.i_in1].mul,
'uvuv': self.irreps_in1[i.i_in1].mul * self.irreps_in2[i.i_in2].mul,
}[i.connection_mode]
for i in self.instructions
)
return (
f"{self.__class__.__name__}"
f"({self.irreps_in1.simplify()} x {self.irreps_in2.simplify()} "
f"-> {self.irreps_out.simplify()} | {npath} paths | {self.weight_numel} weights)"
)
def prepare_weight_list(self, weight):
if self.weight_numel:
weight_shapes = [ins.weight_shape for ins in self.instructions if ins.has_weight]
if weight is None:
weight = list(self.weight.values())
if torch.is_tensor(weight):
ws = []
i = 0
for shape in weight_shapes:
d = _prod(shape)
if not self.shared_weights:
ws += [weight[..., i:i+d].reshape((-1,) + shape)]
else:
ws += [weight[i:i+d].reshape(shape)]
i += d
weight = ws
if isinstance(weight, list):
if not self.shared_weights:
weight = [w.reshape(-1, *shape) for w, shape in zip(weight, weight_shapes)]
else:
weight = [w.reshape(*shape) for w, shape in zip(weight, weight_shapes)]
else:
weight = []
return weight
def right(self, features_2, weight=None):
r"""evaluate partially :math:`w x \cdot \otimes y`
It returns an operator in the form of a matrix.
Parameters
----------
features_2 : `torch.Tensor`
tensor of shape ``(..., irreps_in2.dim)``
weight : `torch.Tensor` or list of `torch.Tensor`, optional
required if ``internal_weights`` is ``False``
tensor of shape ``(self.weight_numel,)`` if ``shared_weights`` is ``True``
tensor of shape ``(..., self.weight_numel)`` if ``shared_weights`` is ``False``
or list of tensors of shapes ``weight_shape`` / ``(...) + weight_shape``.
Use ``self.instructions`` to know what are the weights used for.
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_in1.dim, irreps_out.dim)``
"""
with torch.autograd.profiler.record_function(repr(self)):
weight = self.prepare_weight_list(weight)
wigners = [getattr(self, f"C{i}") for i in range(len(self.wigners))]
return self._compiled_main_right(features_2, weight, wigners)
def forward(self, features_1, features_2, weight=None):
r"""evaluate :math:`w x \otimes y`
Parameters
----------
features_1 : `torch.Tensor`
tensor of shape ``(..., irreps_in1.dim)``
features_2 : `torch.Tensor`
tensor of shape ``(..., irreps_in2.dim)``
weight : `torch.Tensor` or list of `torch.Tensor`, optional
required if ``internal_weights`` is ``False``
tensor of shape ``(self.weight_numel,)`` if ``shared_weights`` is ``True``
tensor of shape ``(..., self.weight_numel)`` if ``shared_weights`` is ``False``
or list of tensors of shapes ``weight_shape`` / ``(...) + weight_shape``.
Use ``self.instructions`` to know what are the weights used for.
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_out.dim)``
"""
with torch.autograd.profiler.record_function(repr(self)):
weight = self.prepare_weight_list(weight)
wigners = [getattr(self, f"C{i}") for i in range(len(self.wigners))]
return self._compiled_main_out(features_1, features_2, weight, wigners)
# In order to support copy.deepcopy and pickling, we need to not save the compiled TorchScript functions:
# See pickle docs: https://docs.python.org/3/library/pickle.html#pickling-class-instances
# torch.nn.Module does not currently impliment __get/setstate__ but may in the future, which is why we have these hasattr checks.
def __getstate__(self):
if hasattr(super(), "__getstate__"):
out = super().__getstate__().copy()
else:
out = self.__dict__.copy()
del out['_compiled_main_out']
del out['_compiled_main_right']
return out
def __setstate__(self, d):
d = d.copy()
d["_compiled_main_out"] = eval_code(d['code_out']).main
d["_compiled_main_right"] = eval_code(d['code_right']).main
if hasattr(super(), "__setstate__"):
super().__setstate__(d)
else:
self.__dict__.update(d)
class FullyConnectedTensorProduct(TensorProduct):
r"""Fully-connected weighted tensor product
All the possible path allowed by :math:`|l_1 - l_2| \leq l_{out} \leq l_1 + l_2` are made.
The output is a sum on different paths:
.. math::
z_w = \sum_{u,v} w_{uvw} x_u \otimes y_v + \cdots \text{other paths}
where :math:`u,v,w` are the indices of the multiplicites.
Parameters
----------
irreps_in1 : `Irreps`
representation of the first input
irreps_in2 : `Irreps`
representation of the second input
irreps_out : `Irreps`
representation of the output
normalization : {'component', 'norm'}
see `TensorProduct`
internal_weights : bool
see `TensorProduct`
shared_weights : bool
see `TensorProduct`
"""
def __init__(
self,
irreps_in1,
irreps_in2,
irreps_out,
normalization='component',
internal_weights=None,
shared_weights=None
):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
irreps_out = o3.Irreps(irreps_out).simplify()
in1 = [(mul, ir, 1.0) for mul, ir in irreps_in1]
in2 = [(mul, ir, 1.0) for mul, ir in irreps_in2]
out = [(mul, ir, 1.0) for mul, ir in irreps_out]
instr = [
(i_1, i_2, i_out, 'uvw', True, 1.0)
for i_1, (_, (l_1, p_1)) in enumerate(irreps_in1)
for i_2, (_, (l_2, p_2)) in enumerate(irreps_in2)
for i_out, (_, (l_out, p_out)) in enumerate(irreps_out)
if abs(l_1 - l_2) <= l_out <= l_1 + l_2 and p_1 * p_2 == p_out
]
super().__init__(in1, in2, out, instr, normalization, internal_weights, shared_weights)
class ElementwiseTensorProduct(TensorProduct):
r"""Elementwise-Connected tensor product
.. math::
z_u = x_u \otimes y_u
where :math:`u` runs over the irrep note that ther is no weights.
Parameters
----------
irreps_in1 : `Irreps`
representation of the first input
irreps_in2 : `Irreps`
representation of the second input
irreps_out : iterator of `Irrep`, optional
representations of the output
normalization : {'component', 'norm'}
see `TensorProduct`
"""
def __init__(
self,
irreps_in1,
irreps_in2,
irreps_out=None,
normalization='component',
):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
if irreps_out is not None:
irreps_out = [o3.Irrep(ir) for ir in irreps_out]
assert irreps_in1.num_irreps == irreps_in2.num_irreps
irreps_in1 = list(irreps_in1)
irreps_in2 = list(irreps_in2)
i = 0
while i < len(irreps_in1):
mul_1, ir_1 = irreps_in1[i]
mul_2, ir_2 = irreps_in2[i]
if mul_1 < mul_2:
irreps_in2[i] = (mul_1, ir_2)
irreps_in2.insert(i + 1, (mul_2 - mul_1, ir_2))
if mul_2 < mul_1:
irreps_in1[i] = (mul_2, ir_1)
irreps_in1.insert(i + 1, (mul_1 - mul_2, ir_1))
i += 1
out = []
instr = []
for i, ((mul, ir_1), (mul_2, ir_2)) in enumerate(zip(irreps_in1, irreps_in2)):
assert mul == mul_2
for ir in ir_1 * ir_2:
if irreps_out is not None and ir not in irreps_out:
continue
i_out = len(out)
out.append((mul, ir))
instr += [
(i, i, i_out, 'uuu', False)
]
super().__init__(irreps_in1, irreps_in2, out, instr, normalization, internal_weights=False)
class FullTensorProduct(TensorProduct):
r"""Full tensor product between two irreps
.. math::
z_{uv} = x_u \otimes y_v
where :math:`u` and :math:`v` runs over the irrep, note that ther is no weights.
Parameters
----------
irreps_in1 : `Irreps`
representation of the first input
irreps_in2 : `Irreps`
representation of the second input
irreps_out : iterator of `Irrep`, optional
representations of the output
normalization : {'component', 'norm'}
see `TensorProduct`
"""
def __init__(
self,
irreps_in1,
irreps_in2,
irreps_out=None,
normalization='component',
):
irreps_in1 = o3.Irreps(irreps_in1).simplify()
irreps_in2 = o3.Irreps(irreps_in2).simplify()
if irreps_out is not None:
irreps_out = [o3.Irrep(ir) for ir in irreps_out]
out = []
instr = []
for i_1, (mul_1, ir_1) in enumerate(irreps_in1):
for i_2, (mul_2, ir_2) in enumerate(irreps_in2):
for ir_out in ir_1 * ir_2:
if irreps_out is not None and ir_out not in irreps_out:
continue
i_out = len(out)
out.append((mul_1 * mul_2, ir_out))
instr += [
(i_1, i_2, i_out, 'uvuv', False)
]
out = o3.Irreps(out)
out, p, _ = out.sort()
instr = [
(i_1, i_2, p[i_out], mode, train)
for i_1, i_2, i_out, mode, train in instr
]
super().__init__(irreps_in1, irreps_in2, out, instr, normalization, internal_weights=False)
class Linear(TensorProduct):
r"""Linear operation equivariant to :math:`O(3)`
Parameters
----------
irreps_in : `Irreps`
representation of the input
irreps_out : `Irreps`
representation of the output
internal_weights : bool
see `TensorProduct`
shared_weights : bool
see `TensorProduct`
Examples
--------
Linearly combines 4 scalars into 8 scalars and 16 vectors into 8 vectors.
>>> lin = Linear("4x0e+16x1o", "8x0e+8x1o")
>>> lin.weight_numel
160
"""
def __init__(
self,
irreps_in,
irreps_out,
internal_weights=None,
shared_weights=None,
):
irreps_in = o3.Irreps(irreps_in).simplify()
irreps_out = o3.Irreps(irreps_out).simplify()
instr = [
(i_in, 0, i_out, 'uvw', True, 1.0)
for i_in, (_, ir_in) in enumerate(irreps_in)
for i_out, (_, ir_out) in enumerate(irreps_out)
if ir_in == ir_out
]
super().__init__(irreps_in, "0e", irreps_out, instr, internal_weights=internal_weights, shared_weights=shared_weights)
self.irreps_in = irreps_in
self.irreps_out = irreps_out
def __repr__(self):
return f"{self.__class__.__name__}({self.irreps_in} -> {self.irreps_out} | {self.weight_numel} weights)"
def forward(self, features, weight=None):
"""evaluate
Parameters
----------
features : `torch.Tensor`
tensor of shape ``(..., irreps_in.dim)``
weight : `torch.Tensor`, optional
required if ``internal_weights`` is `False`
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_out.dim)``
"""
ones = features.new_ones(features.shape[:-1] + (1,))
return super().forward(features, ones, weight)
class Norm(TensorProduct):
r"""Norm operation
Parameters
----------
irreps_in : `Irreps`
representation of the input
normalization : {'component', 'norm'}
see `TensorProduct`
Examples
--------
Compute the norms of 17 vectors.
>>> norm = Norm("17x1o")
>>> norm(torch.randn(17 * 3)).shape
torch.Size([17])
"""
def __init__(
self,
irreps_in,
):
irreps_in = o3.Irreps(irreps_in).simplify()
irreps_out = o3.Irreps([(mul, "0e") for mul, _ in irreps_in])
instr = [
(i, i, i, 'uuu', False, ir.dim)
for i, (mul, ir) in enumerate(irreps_in)
]
super().__init__(irreps_in, irreps_in, irreps_out, instr, 'component')
self.irreps_in = irreps_in
self.irreps_out = irreps_out.simplify()
def __repr__(self):
return f"{self.__class__.__name__}({self.irreps_in})"
def forward(self, features):
"""evaluate
Parameters
----------
features : `torch.Tensor`
tensor of shape ``(..., irreps_in.dim)``
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_out.dim)``
"""
return super().forward(features, features).sqrt()
| [
"e3nn.util.eval_code",
"torch.nn.ParameterDict",
"torch.get_default_dtype",
"collections.namedtuple",
"e3nn.o3.Irrep",
"e3nn.o3.Irreps",
"e3nn.o3.wigner_3j",
"math.sqrt",
"torch.randn",
"torch.is_tensor",
"torch.zeros",
"torch.ones"
] | [((5868, 5915), 'e3nn.o3.Irreps', 'o3.Irreps', (['[(mul, ir) for mul, ir, _var in in1]'], {}), '([(mul, ir) for mul, ir, _var in in1])\n', (5877, 5915), False, 'from e3nn import o3\n'), ((5942, 5989), 'e3nn.o3.Irreps', 'o3.Irreps', (['[(mul, ir) for mul, ir, _var in in2]'], {}), '([(mul, ir) for mul, ir, _var in in2])\n', (5951, 5989), False, 'from e3nn import o3\n'), ((6016, 6063), 'e3nn.o3.Irreps', 'o3.Irreps', (['[(mul, ir) for mul, ir, _var in out]'], {}), '([(mul, ir) for mul, ir, _var in out])\n', (6025, 6063), False, 'from e3nn import o3\n'), ((8916, 9029), 'collections.namedtuple', 'namedtuple', (['"""Instruction"""', '"""i_in1, i_in2, i_out, connection_mode, has_weight, path_weight, weight_shape"""'], {}), "('Instruction',\n 'i_in1, i_in2, i_out, connection_mode, has_weight, path_weight, weight_shape'\n )\n", (8926, 9029), False, 'from collections import namedtuple\n'), ((35102, 35116), 'e3nn.o3.Irreps', 'o3.Irreps', (['out'], {}), '(out)\n', (35111, 35116), False, 'from e3nn import o3\n'), ((37823, 37871), 'e3nn.o3.Irreps', 'o3.Irreps', (["[(mul, '0e') for mul, _ in irreps_in]"], {}), "([(mul, '0e') for mul, _ in irreps_in])\n", (37832, 37871), False, 'from e3nn import o3\n'), ((5403, 5417), 'e3nn.o3.Irreps', 'o3.Irreps', (['in1'], {}), '(in1)\n', (5412, 5417), False, 'from e3nn import o3\n'), ((5497, 5511), 'e3nn.o3.Irreps', 'o3.Irreps', (['in2'], {}), '(in2)\n', (5506, 5511), False, 'from e3nn import o3\n'), ((5591, 5605), 'e3nn.o3.Irreps', 'o3.Irreps', (['out'], {}), '(out)\n', (5600, 5605), False, 'from e3nn import o3\n'), ((11622, 11746), 'math.sqrt', 'sqrt', (["(alpha / {'uvw': mul_1 * mul_2, 'uvu': mul_2, 'uvv': mul_1, 'uuw': mul_1,\n 'uuu': 1, 'uvuv': 1}[ins.connection_mode])"], {}), "(alpha / {'uvw': mul_1 * mul_2, 'uvu': mul_2, 'uvv': mul_1, 'uuw':\n mul_1, 'uuu': 1, 'uvuv': 1}[ins.connection_mode])\n", (11626, 11746), False, 'from math import sqrt\n'), ((22999, 23023), 'e3nn.util.eval_code', 'eval_code', (['self.code_out'], {}), '(self.code_out)\n', (23008, 23023), False, 'from e3nn.util import eval_code\n'), ((23102, 23128), 'e3nn.util.eval_code', 'eval_code', (['self.code_right'], {}), '(self.code_right)\n', (23111, 23128), False, 'from e3nn.util import eval_code\n'), ((23259, 23288), 'e3nn.o3.wigner_3j', 'o3.wigner_3j', (['l_1', 'l_2', 'l_out'], {}), '(l_1, l_2, l_out)\n', (23271, 23288), False, 'from e3nn import o3\n'), ((23803, 23827), 'torch.nn.ParameterDict', 'torch.nn.ParameterDict', ([], {}), '()\n', (23825, 23827), False, 'import torch\n'), ((25694, 25717), 'torch.is_tensor', 'torch.is_tensor', (['weight'], {}), '(weight)\n', (25709, 25717), False, 'import torch\n'), ((29538, 29562), 'e3nn.util.eval_code', 'eval_code', (["d['code_out']"], {}), "(d['code_out'])\n", (29547, 29562), False, 'from e3nn.util import eval_code\n'), ((29604, 29630), 'e3nn.util.eval_code', 'eval_code', (["d['code_right']"], {}), "(d['code_right'])\n", (29613, 29630), False, 'from e3nn.util import eval_code\n'), ((24514, 24539), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (24537, 24539), False, 'import torch\n'), ((30841, 30862), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in1'], {}), '(irreps_in1)\n', (30850, 30862), False, 'from e3nn import o3\n'), ((30895, 30916), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in2'], {}), '(irreps_in2)\n', (30904, 30916), False, 'from e3nn import o3\n'), ((30949, 30970), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_out'], {}), '(irreps_out)\n', (30958, 30970), False, 'from e3nn import o3\n'), ((32332, 32353), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in1'], {}), '(irreps_in1)\n', (32341, 32353), False, 'from e3nn import o3\n'), ((32386, 32407), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in2'], {}), '(irreps_in2)\n', (32395, 32407), False, 'from e3nn import o3\n'), ((32480, 32492), 'e3nn.o3.Irrep', 'o3.Irrep', (['ir'], {}), '(ir)\n', (32488, 32492), False, 'from e3nn import o3\n'), ((34392, 34413), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in1'], {}), '(irreps_in1)\n', (34401, 34413), False, 'from e3nn import o3\n'), ((34446, 34467), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in2'], {}), '(irreps_in2)\n', (34455, 34467), False, 'from e3nn import o3\n'), ((34540, 34552), 'e3nn.o3.Irrep', 'o3.Irrep', (['ir'], {}), '(ir)\n', (34548, 34552), False, 'from e3nn import o3\n'), ((36115, 36135), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in'], {}), '(irreps_in)\n', (36124, 36135), False, 'from e3nn import o3\n'), ((36168, 36189), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_out'], {}), '(irreps_out)\n', (36177, 36189), False, 'from e3nn import o3\n'), ((37770, 37790), 'e3nn.o3.Irreps', 'o3.Irreps', (['irreps_in'], {}), '(irreps_in)\n', (37779, 37790), False, 'from e3nn import o3\n'), ((24206, 24230), 'torch.ones', 'torch.ones', (['(mul * ir.dim)'], {}), '(mul * ir.dim)\n', (24216, 24230), False, 'import torch\n'), ((24334, 24359), 'torch.zeros', 'torch.zeros', (['(mul * ir.dim)'], {}), '(mul * ir.dim)\n', (24345, 24359), False, 'import torch\n'), ((24128, 24157), 'torch.randn', 'torch.randn', (['ins.weight_shape'], {}), '(ins.weight_shape)\n', (24139, 24157), False, 'import torch\n'), ((15350, 15367), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (15354, 15367), False, 'from math import sqrt\n'), ((15475, 15492), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (15479, 15492), False, 'from math import sqrt\n'), ((15802, 15819), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (15806, 15819), False, 'from math import sqrt\n'), ((15929, 15946), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (15933, 15946), False, 'from math import sqrt\n'), ((16259, 16276), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (16263, 16276), False, 'from math import sqrt\n'), ((16385, 16402), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (16389, 16402), False, 'from math import sqrt\n'), ((16710, 16727), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (16714, 16727), False, 'from math import sqrt\n'), ((16819, 16836), 'math.sqrt', 'sqrt', (['(2 * l_1 + 1)'], {}), '(2 * l_1 + 1)\n', (16823, 16836), False, 'from math import sqrt\n'), ((17412, 17419), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (17416, 17419), False, 'from math import sqrt\n'), ((18398, 18405), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (18402, 18405), False, 'from math import sqrt\n')] |
import functools
from itertools import zip_longest
from Bio import Phylo
def memoize(func):
cache = func.cache = {}
@functools.wraps(func)
def memoized_func(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return memoized_func
class Tree(object):
""""""
def __init__(self, newick_tree):
self.tree = Phylo.read(
newick_tree, "newick", values_are_confidence=True, rooted=True
)
@memoize
def find_clades(self, tx):
"""
Save clades for all observed species assignments to speed up
annotations.
"""
return [i for i in self.tree.find_clades(tx)][0]
@memoize
def get_path(self, clade):
cpath = self.tree.get_path(clade)
cpath.insert(0, self.tree.root)
return cpath
def get_clade(self, tx, percent_id):
if not tx:
return None
if percent_id < 0.80:
cutoff = "k"
elif percent_id >= 0.80 and percent_id < 0.85:
cutoff = "p"
elif percent_id >= 0.85 and percent_id < 0.90:
cutoff = "c"
elif percent_id >= 0.90 and percent_id < 0.95:
cutoff = "o"
elif percent_id >= 0.95 and percent_id < 0.97:
cutoff = "f"
elif percent_id >= 0.97 and percent_id < 0.99:
cutoff = "g"
else:
cutoff = "s"
c = self.find_clades(tx)
cpath = self.get_path(c)
for clade in cpath:
if clade.name.startswith(cutoff):
# cutoff due to alignment percentage
return clade
# target was less specific than cutoff
return c
@staticmethod
def tax_str(tx):
if isinstance(tx, str):
tx = [tx]
for i, t in zip_longest("kpcofgs", tx):
if not t:
tx.append("%s__?" % i)
else:
assert t.startswith("%s__" % i)
return tx
def lca(self, tx, percent_id):
if isinstance(tx, str):
tx = [tx]
c = set([self.get_clade(i, percent_id) for i in tx])
c = self.tree.common_ancestor(c)
if c == self.tree.root:
return self.tax_str([self.tree.root.name])
else:
l = self.get_path(c)
return self.tax_str([c.name for c in l])
| [
"itertools.zip_longest",
"Bio.Phylo.read",
"functools.wraps"
] | [((129, 150), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (144, 150), False, 'import functools\n'), ((448, 522), 'Bio.Phylo.read', 'Phylo.read', (['newick_tree', '"""newick"""'], {'values_are_confidence': '(True)', 'rooted': '(True)'}), "(newick_tree, 'newick', values_are_confidence=True, rooted=True)\n", (458, 522), False, 'from Bio import Phylo\n'), ((1890, 1916), 'itertools.zip_longest', 'zip_longest', (['"""kpcofgs"""', 'tx'], {}), "('kpcofgs', tx)\n", (1901, 1916), False, 'from itertools import zip_longest\n')] |
#!env python
import collections
import queue
import logging
import enum
import functools
import json
import time
import os
import gzip
import shutil
import random # ONLY USED FOR RANDOM DELAY AT BEGINNING.
import numpy as np
import argparse
import sys
sys.path.append("../src-testbed")
import events
import common
import placement_controller
class LocalController(object):
def __init__(self, simulation):
self.simulation = simulation
def requestInference(self, curr_time, request):
new_events = []
if len(self.simulation.model_placements.getWorkersFromModel(request.model)) > 0:
# There is a placement of the model already
worker = self.selectWorker(self.simulation.model_placements.getWorkersFromModel(request.model))
new_events.extend(worker.assignRequest(curr_time, request, model_miss=False))
elif self.simulation.flags.do_reactive:
new_events.extend(self.simulation.placement_controller.requestPlacement(curr_time, request))
else:
logging.error("No available workers found")
request.markRejected()
new_events.append( (curr_time, events.RequestCompletionEvent(self.simulation, request)) )
return new_events
def selectWorker(self, possible_workers):
return self.simulation.rng.choice(possible_workers)
class PlacementController(object):
def __init__(self, simulation, flags):
self.simulation = simulation
self.flags = flags
self.model_placements = self.simulation.model_placements
self.placement_controller = placement_controller.PlacementController(flags)
self.placement_controller.model_placements = self.model_placements # Overwrite with our model_placements
def requestPlacement(self, curr_time, request):
new_events = []
model_info = {
model: {
"open_requests" : (self.simulation.metrics.per_model_requests[model] - self.simulation.metrics.per_model_responses[model]),
"last_used" : model.last_used,
"requests_submitted": self.simulation.metrics.per_model_requests[model],
"placement_count" : len(self.model_placements.getWorkersFromModel(model)),
"load_latency" : model.getLoadLatency(),
"exec_latency" : model.getExecLatency(),
"loaded_size" : model.getSize(),
}
for model in self.model_placements.getModels()
}
self.placement_controller.setModelInfo(model_info)
self.placement_controller.requestToAddModels([request.model], request.id)
# TODO: Figure out the proper logic on these. Specifically, this should be negotiated through the local controller
while not self.placement_controller.model_placements.removals.empty():
# First we schedule all removals
worker, model = self.model_placements.removals.get()
new_events.extend(worker.removeModel(curr_time, model))
self.simulation.mark_as_saturated()
while not self.placement_controller.model_placements.additions.empty():
# Next we schedule all additions
worker, model = self.model_placements.additions.get()
new_events.extend(worker.addModel(curr_time, model))
# Next we schedule the model on the chosen worker (or see what worker can now take it and assign it)
if len(self.simulation.model_placements.getWorkersFromModel(request.model)) > 0:
worker = self.simulation.local_controller.selectWorker(self.simulation.model_placements.getWorkersFromModel(request.model))
new_events.extend(worker.assignRequest(curr_time, request, model_miss=True))
else:
request.markRejected()
new_events.append( (curr_time, events.RequestCompletionEvent(self.simulation, request)) )
return new_events
@functools.total_ordering
class Worker(object):
class QueueItem(object):
def __init__(self, item, latency):
self.item = item
self.latency = latency
def getLatency(self):
return self.latency
def __init__(self, simulation, worker_name, *args, **kwargs):
self.simulation = simulation
self.name = worker_name
self.executing = False
self.queue = queue.Queue()
self.models_loaded = set()
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return self.name == other
def assignRequest(self, curr_time, request, model_miss):
new_events = []
request.assignToWorker(curr_time, model_miss)
self.queue.put(self.__class__.QueueItem(request, request.model.getExecLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def removeModel(self, curr_time, model):
new_events = []
event_to_add = events.ModelRemovalEvent(self.simulation, self, model)
self.queue.put(self.QueueItem(event_to_add, model.getUnloadLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def addModel(self, curr_time, model):
new_events = []
self.queue.put(self.QueueItem(events.ModelAdditionEvent(self.simulation, self, model), model.getLoadLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def _removeModel(self, curr_time, model):
new_events = []
print(f"({curr_time:0.3f}) Removing {model} from {self}")
self.models_loaded.remove(model)
return new_events
def _addModel(self, curr_time, model):
new_events = []
print(f"({curr_time:0.3f}) Adding {model} to {self}")
self.models_loaded.add(model)
return new_events
def startExecuting(self, curr_time):
new_events = []
if self.executing:
return new_events
if self.queue.empty():
return new_events
self.executing = True
next_queue_item = self.queue.get()
if isinstance(next_queue_item.item, self.simulation.Request):
new_events.extend(next_queue_item.item.model.executeRequest(curr_time))
next_queue_item.item.startExecution(curr_time)
completion_event = events.WorkerQueueCompletionEvent(self.simulation, self, next_queue_item)
new_events.append((curr_time + next_queue_item.getLatency(), completion_event))
return new_events
@functools.total_ordering
class Model(common.ModelPlacements.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def executeRequest(self, curr_time):
new_events = []
self.last_used = curr_time
return new_events
class Simulation(object):
class Metrics(object):
def __init__(self, simulation):
self.simulation = simulation
self.general_metrics = {
"requests_in" : 0,
"requests_out" : 0,
}
self.per_model_requests = collections.defaultdict(int)
self.per_model_responses = collections.defaultdict(int)
self.per_model_latency = collections.defaultdict(list)
def markRequestIn(self, model_name):
self.general_metrics["requests_in"] += 1
self.per_model_requests[model_name] += 1
def markRequestOut(self, model_name, latency):
self.general_metrics["requests_out"] += 1
self.per_model_responses[model_name] += 1
self.per_model_latency[model_name].append(latency)
def reportMetrics(self):
print(f"Requests: {self.general_metrics['requests_out']} / {self.general_metrics['requests_in']} completed")
for model in sorted(self.per_model_latency.keys()):
print(f"{model} : {np.average(self.per_model_latency[model]):0.3f} : {np.average(self.per_model_latency[model]) / self.simulation.models_by_name[model].load_latency:%}")
class Request(object):
class Status(enum.Enum):
INIT = 1
ACCEPTED = 2
REJECTED = 3
EXECUTING = 4
COMPLETED = 5
def __init__(self, simulation, request_id, arrival_time, model_requested, *args, **kwargs):
self.simulation = simulation
self.status = self.__class__.Status.INIT
self.id = int(request_id)
self.model_requested = model_requested
self.model = self.simulation.models_by_name[model_requested]
self.arrival_time = float(arrival_time)
self.assignment_time = float('inf')
self.execution_time = float('inf')
self.completion_time = float('inf')
self.model_miss = False
self.is_saturated = False
def __str__(self):
return f"R({self.id}, {self.arrival_time}, {self.model_requested}, {self.status})"
def markRejected(self):
self.status = self.__class__.Status.REJECTED
def markComplete(self, curr_time):
self.completion_time = curr_time
self.status = self.__class__.Status.COMPLETED
self.simulation.metrics.markRequestOut(self.model_requested, (curr_time-self.arrival_time))
def assignToWorker(self, curr_time, model_miss):
self.assignment_time = curr_time
self.model_miss = model_miss
def startExecution(self, curr_time):
self.execution_time = curr_time
def getResponse(self):
response_dict = {
"request_id" : self.id,
"model" : self.model_requested,
"response" : f"{self.status}",
"placement_delay" : self.assignment_time - self.arrival_time,
"queue_delay" : self.execution_time - self.assignment_time,
"execution_delay" : self.completion_time - self.execution_time,
"overall_latency" : self.completion_time - self.arrival_time,
"model_miss" : self.model_miss,
"saturated" : self.is_saturated,
}
return json.dumps(response_dict)
@classmethod
def fromLine(cls, simulation, line):
return cls(simulation, *(line.split()))
def __init__(self, flags, models_to_be_requested, rng_seed=None, *args, **kwargs):
self.flags = flags
self.rng = np.random.default_rng(rng_seed)
self.results_fid = gzip.open(os.path.join(flags.results_dir, f"{flags.run_identifier}.log.gz"), 'wt')
self.cache_size = flags.max_concurrent_models
self.is_saturated = False
model_descriptions = common.getModelInfo(json_file=flags.model_description_file)
time.sleep(10*random.random())
if not os.path.exists(os.path.join(flags.results_dir, os.path.basename(flags.model_description_file))):
shutil.copy(flags.model_description_file, flags.results_dir)
shutil.copy(flags.workload_file, flags.results_dir)
# Internally important data
self.models_by_name = {
model_name : Model(model_name, model_descriptions[model_name])
for model_name in models_to_be_requested
}
self.workers_by_name = {
worker_name : Worker(self, worker_name)
for worker_name in [f"worker_{i:02d}" for i in range(flags.num_workers_to_add)]
}
self.model_placements = common.ModelPlacements()
for model in self.models_by_name.values():
self.model_placements.addModel(model)
for worker in self.workers_by_name.values():
self.model_placements.addWorker(worker)
self.metrics = self.Metrics(self)
# Components
self.local_controller = LocalController(self)
self.placement_controller = PlacementController(self, self.flags)
# Event Queue
self.event_queue = queue.PriorityQueue()
# Setup some models in cache, because why not
#for worker in sorted(self.workers_by_name.values()):
# for model in self.rng.choice(sorted(self.models_by_name.values()), size=self.cache_size, replace=False):
# self.model_placements.addModelToWorker(worker, model)
#self.model_placements.sync()
def run(self):
logging.info("Starting simulation")
while not self.event_queue.empty():
curr_time, next_event = self.event_queue.get()
logging.debug(f"NextEvent -> ({curr_time} : {next_event}")
events_to_add = next_event.run(curr_time)
for event_tuple in events_to_add:
self.event_queue.put(event_tuple)
logging.info("Simulation complete")
self.metrics.reportMetrics()
self.results_fid.close()
def mark_as_saturated(self):
self.is_saturated = True
def recordExit(self, request):
self.results_fid.write(f"{request.getResponse()}\n")
def getFlags():
parser = argparse.ArgumentParser(
parents=[
common.getParser(add_help=False),
placement_controller.getParser(add_help=False, include_parents=False)
],
conflict_handler='resolve'
)
parser.add_argument("--cache_size", default=3)
parser.add_argument('--workload_file', default="../workload/workload.txt")
parser.add_argument('--model_description_file', default="../workload/models.json")
parser.add_argument('--stop_after', default=float('inf'), type=float)
parser.add_argument('--run_identifier', default=None,
help="Identifier for saving data logs")
parser.add_argument('--results_dir', default="results/")
parser.add_argument('--show_debug', action='store_true')
parser.add_argument('--base_logging_dir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs/simulation')) )
parser.add_argument('--run_series', default=None)
flags = parser.parse_args()
if flags.run_identifier is None:
flags.run_identifier = flags.model_eviction_algorithm
flags.run_identifier = f"{flags.run_identifier}.{int(time.time())}"
if flags.run_series is not None:
flags.base_logging_dir = os.path.join(flags.base_logging_dir, flags.run_series)
else:
flags.base_logging_dir = os.path.join(flags.base_logging_dir, flags.run_identifier)
flags.results_dir = flags.base_logging_dir
if not os.path.exists(flags.results_dir):
os.makedirs(flags.results_dir)
return flags
def main():
flags = getFlags()
common.getLogger(hide_debug=(not flags.show_debug))
with open(flags.workload_file) as workload_fid:
models_to_be_requested = set([l.split(' ')[2].strip() for l in workload_fid.readlines()])
simulation = Simulation(flags, models_to_be_requested, cache_size=flags.cache_size, rng_seed=flags.rng_seed)
workload_fid = open(flags.workload_file)
line = workload_fid.readline()
first_request = simulation.Request.fromLine(simulation, line)
simulation.event_queue.put( (first_request.arrival_time, events.RequestArrival(simulation, first_request, workload_fid)) )
simulation.run()
workload_fid.close()
if __name__ == '__main__':
main() | [
"numpy.random.default_rng",
"events.WorkerQueueCompletionEvent",
"common.getLogger",
"logging.debug",
"events.ModelAdditionEvent",
"logging.info",
"sys.path.append",
"logging.error",
"os.path.exists",
"common.getParser",
"json.dumps",
"events.ModelRemovalEvent",
"events.RequestCompletionEven... | [((254, 287), 'sys.path.append', 'sys.path.append', (['"""../src-testbed"""'], {}), "('../src-testbed')\n", (269, 287), False, 'import sys\n'), ((14014, 14063), 'common.getLogger', 'common.getLogger', ([], {'hide_debug': '(not flags.show_debug)'}), '(hide_debug=not flags.show_debug)\n', (14030, 14063), False, 'import common\n'), ((1527, 1574), 'placement_controller.PlacementController', 'placement_controller.PlacementController', (['flags'], {}), '(flags)\n', (1567, 1574), False, 'import placement_controller\n'), ((4208, 4221), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (4219, 4221), False, 'import queue\n'), ((4973, 5027), 'events.ModelRemovalEvent', 'events.ModelRemovalEvent', (['self.simulation', 'self', 'model'], {}), '(self.simulation, self, model)\n', (4997, 5027), False, 'import events\n'), ((6303, 6376), 'events.WorkerQueueCompletionEvent', 'events.WorkerQueueCompletionEvent', (['self.simulation', 'self', 'next_queue_item'], {}), '(self.simulation, self, next_queue_item)\n', (6336, 6376), False, 'import events\n'), ((10047, 10078), 'numpy.random.default_rng', 'np.random.default_rng', (['rng_seed'], {}), '(rng_seed)\n', (10068, 10078), True, 'import numpy as np\n'), ((10302, 10361), 'common.getModelInfo', 'common.getModelInfo', ([], {'json_file': 'flags.model_description_file'}), '(json_file=flags.model_description_file)\n', (10321, 10361), False, 'import common\n'), ((11031, 11055), 'common.ModelPlacements', 'common.ModelPlacements', ([], {}), '()\n', (11053, 11055), False, 'import common\n'), ((11473, 11494), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (11492, 11494), False, 'import queue\n'), ((11848, 11883), 'logging.info', 'logging.info', (['"""Starting simulation"""'], {}), "('Starting simulation')\n", (11860, 11883), False, 'import logging\n'), ((12176, 12211), 'logging.info', 'logging.info', (['"""Simulation complete"""'], {}), "('Simulation complete')\n", (12188, 12211), False, 'import logging\n'), ((13676, 13730), 'os.path.join', 'os.path.join', (['flags.base_logging_dir', 'flags.run_series'], {}), '(flags.base_logging_dir, flags.run_series)\n', (13688, 13730), False, 'import os\n'), ((13768, 13826), 'os.path.join', 'os.path.join', (['flags.base_logging_dir', 'flags.run_identifier'], {}), '(flags.base_logging_dir, flags.run_identifier)\n', (13780, 13826), False, 'import os\n'), ((13884, 13917), 'os.path.exists', 'os.path.exists', (['flags.results_dir'], {}), '(flags.results_dir)\n', (13898, 13917), False, 'import os\n'), ((13923, 13953), 'os.makedirs', 'os.makedirs', (['flags.results_dir'], {}), '(flags.results_dir)\n', (13934, 13953), False, 'import os\n'), ((7009, 7037), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (7032, 7037), False, 'import collections\n'), ((7071, 7099), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (7094, 7099), False, 'import collections\n'), ((7131, 7160), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7154, 7160), False, 'import collections\n'), ((9777, 9802), 'json.dumps', 'json.dumps', (['response_dict'], {}), '(response_dict)\n', (9787, 9802), False, 'import json\n'), ((10118, 10183), 'os.path.join', 'os.path.join', (['flags.results_dir', 'f"""{flags.run_identifier}.log.gz"""'], {}), "(flags.results_dir, f'{flags.run_identifier}.log.gz')\n", (10130, 10183), False, 'import os\n'), ((10511, 10571), 'shutil.copy', 'shutil.copy', (['flags.model_description_file', 'flags.results_dir'], {}), '(flags.model_description_file, flags.results_dir)\n', (10522, 10571), False, 'import shutil\n'), ((10578, 10629), 'shutil.copy', 'shutil.copy', (['flags.workload_file', 'flags.results_dir'], {}), '(flags.workload_file, flags.results_dir)\n', (10589, 10629), False, 'import shutil\n'), ((11983, 12041), 'logging.debug', 'logging.debug', (['f"""NextEvent -> ({curr_time} : {next_event}"""'], {}), "(f'NextEvent -> ({curr_time} : {next_event}')\n", (11996, 12041), False, 'import logging\n'), ((14529, 14591), 'events.RequestArrival', 'events.RequestArrival', (['simulation', 'first_request', 'workload_fid'], {}), '(simulation, first_request, workload_fid)\n', (14550, 14591), False, 'import events\n'), ((999, 1042), 'logging.error', 'logging.error', (['"""No available workers found"""'], {}), "('No available workers found')\n", (1012, 1042), False, 'import logging\n'), ((5303, 5358), 'events.ModelAdditionEvent', 'events.ModelAdditionEvent', (['self.simulation', 'self', 'model'], {}), '(self.simulation, self, model)\n', (5328, 5358), False, 'import events\n'), ((10380, 10395), 'random.random', 'random.random', ([], {}), '()\n', (10393, 10395), False, 'import random\n'), ((12519, 12551), 'common.getParser', 'common.getParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (12535, 12551), False, 'import common\n'), ((12571, 12640), 'placement_controller.getParser', 'placement_controller.getParser', ([], {'add_help': '(False)', 'include_parents': '(False)'}), '(add_help=False, include_parents=False)\n', (12601, 12640), False, 'import placement_controller\n'), ((13594, 13605), 'time.time', 'time.time', ([], {}), '()\n', (13603, 13605), False, 'import time\n'), ((3726, 3781), 'events.RequestCompletionEvent', 'events.RequestCompletionEvent', (['self.simulation', 'request'], {}), '(self.simulation, request)\n', (3755, 3781), False, 'import events\n'), ((10455, 10501), 'os.path.basename', 'os.path.basename', (['flags.model_description_file'], {}), '(flags.model_description_file)\n', (10471, 10501), False, 'import os\n'), ((13306, 13331), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13321, 13331), False, 'import os\n'), ((1109, 1164), 'events.RequestCompletionEvent', 'events.RequestCompletionEvent', (['self.simulation', 'request'], {}), '(self.simulation, request)\n', (1138, 1164), False, 'import events\n'), ((7734, 7775), 'numpy.average', 'np.average', (['self.per_model_latency[model]'], {}), '(self.per_model_latency[model])\n', (7744, 7775), True, 'import numpy as np\n'), ((7785, 7826), 'numpy.average', 'np.average', (['self.per_model_latency[model]'], {}), '(self.per_model_latency[model])\n', (7795, 7826), True, 'import numpy as np\n')] |
# Copyright (c) 2018, <NAME> <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
"""
Utility module to locate filesystem directories relevant to ps4rp. Conforms to
the XDG spec on Linux. MacOS and Windows support are TODO.
"""
import functools
from xdg import BaseDirectory
_XDG_RESOURCE = 'ps4-remote-play'
@functools.lru_cache()
def cache():
return BaseDirectory.save_cache_path(_XDG_RESOURCE)
@functools.lru_cache()
def config():
return BaseDirectory.save_config_path(_XDG_RESOURCE)
| [
"functools.lru_cache",
"xdg.BaseDirectory.save_config_path",
"xdg.BaseDirectory.save_cache_path"
] | [((310, 331), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (329, 331), False, 'import functools\n'), ((404, 425), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (423, 425), False, 'import functools\n'), ((356, 400), 'xdg.BaseDirectory.save_cache_path', 'BaseDirectory.save_cache_path', (['_XDG_RESOURCE'], {}), '(_XDG_RESOURCE)\n', (385, 400), False, 'from xdg import BaseDirectory\n'), ((451, 496), 'xdg.BaseDirectory.save_config_path', 'BaseDirectory.save_config_path', (['_XDG_RESOURCE'], {}), '(_XDG_RESOURCE)\n', (481, 496), False, 'from xdg import BaseDirectory\n')] |
import os, confuse
config = confuse.Configuration('RecLauncher')
config.set_file('config-st.yaml')
server_port = config['streamlit']['server_port'].get()
os.system(f"streamlit run app.py --server.port {server_port}") | [
"confuse.Configuration",
"os.system"
] | [((29, 65), 'confuse.Configuration', 'confuse.Configuration', (['"""RecLauncher"""'], {}), "('RecLauncher')\n", (50, 65), False, 'import os, confuse\n'), ((155, 217), 'os.system', 'os.system', (['f"""streamlit run app.py --server.port {server_port}"""'], {}), "(f'streamlit run app.py --server.port {server_port}')\n", (164, 217), False, 'import os, confuse\n')] |
from django.db import models
from apps.website.models.article import Article
STATUS_CHOICES = (
("SH", "Show"),
("HD", "Hide"),
)
class Comments(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
name = models.CharField(max_length=50, null=False)
email = models.CharField(max_length=50, null=False)
comments = models.TextField(max_length=2500, null=False)
submitted_on = models.DateTimeField(auto_now_add=True)
status = models.CharField(choices=STATUS_CHOICES, max_length=2,
default='SH')
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Comments"
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((186, 238), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Article'], {'on_delete': 'models.CASCADE'}), '(Article, on_delete=models.CASCADE)\n', (203, 238), False, 'from django.db import models\n'), ((250, 293), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(False)'}), '(max_length=50, null=False)\n', (266, 293), False, 'from django.db import models\n'), ((306, 349), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(False)'}), '(max_length=50, null=False)\n', (322, 349), False, 'from django.db import models\n'), ((365, 410), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2500)', 'null': '(False)'}), '(max_length=2500, null=False)\n', (381, 410), False, 'from django.db import models\n'), ((430, 469), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (450, 469), False, 'from django.db import models\n'), ((483, 551), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'STATUS_CHOICES', 'max_length': '(2)', 'default': '"""SH"""'}), "(choices=STATUS_CHOICES, max_length=2, default='SH')\n", (499, 551), False, 'from django.db import models\n')] |
# the simplex projection algorithm implemented as a layer, while using the saliency maps to obtain object size estimates
import sys
sys.path.insert(0,'/home/briq/libs/caffe/python')
import caffe
import random
import numpy as np
import scipy.misc
import imageio
import cv2
import scipy.ndimage as nd
import os.path
import scipy.io as sio
class SimplexProjectionLayer(caffe.Layer):
saliency_path = '/media/VOC/saliency/thresholded_saliency_images/'
input_list_path = '/home/briq/libs/CSPN/training/input_list.txt'
def simplexProjectionLinear(self, data_ind, class_ind, V_im, nu):
if(nu<1):
return V_im
heatmap_size = V_im.shape[0]*V_im.shape[1]
theta = np.sum(V_im)
if(theta ==nu): # the size constrain is already satisfied
return V_im
if(theta < nu):
pi = V_im+(nu-theta)/heatmap_size
return pi
V = V_im.flatten()
s = 0.0
p = 0.0
U=V
while(len(U) > 0):
k = random.randint(0, len(U)-1)
uk = U[k]
UG = U[U>=uk]
delta_p = len(UG)
delta_s = np.sum(UG)
if ((s+delta_s)-(p+delta_p)*uk<nu):
s = s+delta_s
p = p+delta_p
U = U[U<uk]
else:
U = UG
U = np.delete(U, np.where(U==uk))
if(p<0.000001):
raise ValueError('rho is too small, apparently something went wrong in the CNN') # happens when nu<1 or V_im=infinity for example
theta = (s-nu)/p
pi = V_im-theta
return pi
def setup(self, bottom, top):
self.num_labels = bottom[0].shape[1]
with open(self.input_list_path) as fp:
self.images = fp.readlines()
random.seed()
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
for i in range(bottom[0].num):
im_id = int(bottom[2].data[i])
im_name = self.images[im_id].split(' ')[0].split('.')[0]
top[0].data[i] = bottom[0].data[i]
saliency_name = self.saliency_path+im_name+'.mat'
if (not os.path.isfile(saliency_name)):
continue
saliency_im = sio.loadmat(saliency_name, squeeze_me=True)['data']
for c in range(self.num_labels):
if(c==0):
continue
if(bottom[1].data[i,0,0,c]>0.5): # the label is there
instance = bottom[0].data[i][c]
nu = np.sum(saliency_im==c)
if(nu>1):
instance = bottom[0].data[i][c]
top[0].data[i][c]= self.simplexProjectionLinear(i, c, instance, nu)
def backward(self, top, propagate_down, bottom):
pass | [
"sys.path.insert",
"numpy.where",
"scipy.io.loadmat",
"random.seed",
"numpy.sum"
] | [((132, 182), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/briq/libs/caffe/python"""'], {}), "(0, '/home/briq/libs/caffe/python')\n", (147, 182), False, 'import sys\n'), ((711, 723), 'numpy.sum', 'np.sum', (['V_im'], {}), '(V_im)\n', (717, 723), True, 'import numpy as np\n'), ((1802, 1815), 'random.seed', 'random.seed', ([], {}), '()\n', (1813, 1815), False, 'import random\n'), ((1151, 1161), 'numpy.sum', 'np.sum', (['UG'], {}), '(UG)\n', (1157, 1161), True, 'import numpy as np\n'), ((2336, 2379), 'scipy.io.loadmat', 'sio.loadmat', (['saliency_name'], {'squeeze_me': '(True)'}), '(saliency_name, squeeze_me=True)\n', (2347, 2379), True, 'import scipy.io as sio\n'), ((1372, 1389), 'numpy.where', 'np.where', (['(U == uk)'], {}), '(U == uk)\n', (1380, 1389), True, 'import numpy as np\n'), ((2635, 2659), 'numpy.sum', 'np.sum', (['(saliency_im == c)'], {}), '(saliency_im == c)\n', (2641, 2659), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
import sklearn.preprocessing as skp
import data_util as du
import training
class FXLSTM(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, output_seq_len,
bias=True, dropout=0,
batch_first=False, ):
super(FXLSTM, self).__init__()
assert(num_layers > 0)
assert(output_seq_len > 0)
self.output_seq_len = output_seq_len
self.out_idx = Variable(torch.arange(output_seq_len).long())
self.lstm = nn.LSTM(input_size=input_dim,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
dropout=dropout, batch_first=batch_first)
self.linear = nn.Linear(hidden_size * output_seq_len,
hidden_size * output_seq_len)
def forward(self, x):
z, *_ = self.lstm(x)
_, N, hidden_size = z.shape
# extrct the first set of output used
if z.is_cuda and not self.out_idx.is_cuda:
self.out_idx = self.out_idx.cuda()
u = torch.index_select(z, 0, self.out_idx)
# reshape for linear layer
a = u.permute([1, 2, 0]).contiguous().view((N, -1))
z = self.linear(a)
# change back to LSTM output format
z = z.view((N, hidden_size, self.output_seq_len)
).permute([2, 0, 1]).contiguous()
return z
class FXGRU(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, output_seq_len,
bias=True, dropout=0,
batch_first=False, ):
super(FXGRU, self).__init__()
assert(num_layers > 0)
assert(output_seq_len > 0)
self.output_seq_len = output_seq_len
self.out_idx = Variable(torch.arange(output_seq_len).long())
self.lstm = nn.GRU(input_size=input_dim,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
dropout=dropout, batch_first=batch_first)
self.linear = nn.Linear(hidden_size * output_seq_len,
hidden_size * output_seq_len)
def forward(self, x):
z, *_ = self.lstm(x)
_, N, hidden_size = z.shape
# extrct the first set of output used
if z.is_cuda and not self.out_idx.is_cuda:
self.out_idx = self.out_idx.cuda()
u = torch.index_select(z, 0, self.out_idx)
# reshape for linear layer
a = u.permute([1, 2, 0]).contiguous().view((N, -1))
z = self.linear(a)
# change back to LSTM output format
z = z.view((N, hidden_size, self.output_seq_len)
).permute([2, 0, 1]).contiguous()
return z
def reshape_rnn(array, inplace=True):
# pytorch RNN models uses by default (seq_len, batch, input_dim)
array = array.swapaxes(0, 2)
array = array.swapaxes(1, 2)
return array
def load_data(test_size=.2):
'''
Load data and reshape to Pytorch LSTM format, (L, N, D).
N: batch size
D: Dimension
L: sequence length
Parameters
----------
test_size : float, optional
Returns
-------
TYPE
'''
train_x, test_x, train_y, test_y = du.load_fx_10m_xy(test_size=test_size,
y_shape_mode=1)
# normalize train and test X.
train_x_2d = train_x.swapaxes(1, 2).reshape((-1, train_x.shape[1]))
test_x_2d = test_x.swapaxes(1, 2).reshape((-1, test_x.shape[1]))
# normalize
scaler_x = skp.StandardScaler()
train_x_2d = scaler_x.fit_transform(train_x_2d)
# train_y_2d = scaler_x.transform(train_y_2d)
test_x_2d = scaler_x.transform(test_x_2d)
# test_y_2d = scaler_x.transform(test_y_2d)
# reshape to LSTM input shape (seq_len, batch, input_dim)
print('Swap axes to fit LSTM...')
train_x = (train_x_2d
.reshape((-1, train_x.shape[-1], train_x_2d.shape[-1]))
.swapaxes(1, 2))
test_x = (test_x_2d
.reshape((-1, test_x.shape[-1], test_x_2d.shape[-1]))
.swapaxes(1, 2))
train_x = reshape_rnn(train_x)
train_y = reshape_rnn(train_y)
test_x = reshape_rnn(test_x)
test_y = reshape_rnn(test_y)
return train_x, test_x, train_y, test_y
def run_model(mode='LSTM'):
# load data
print('Load data...')
train_x, test_x, train_y, test_y = load_data(.2)
# print('Train X.shape: %s, Train y.shape: %s' %
# (train_x.shape, train_y.shape))
# print('Test X.shape: %s, Test y.shape: %s' %
# (test_x.shape, test_y.shape))
input_dim = train_x.shape[-1]
output_seq = train_y.shape[0]
# hyperparams
num_layers = 4
hidden_size = input_dim
dropout = 0.2
print('Running %s Model...' % mode)
if mode == 'LSTM':
model = FXLSTM(input_dim, hidden_size, num_layers, output_seq,
dropout=dropout)
else:
model = FXGRU(input_dim, hidden_size, num_layers, output_seq,
dropout=dropout)
# x_train = Variable(torch.from_numpy(train_x).float())
# y_train = Variable(torch.from_numpy(train_y).float())
# x_test = Variable(torch.from_numpy(test_x).float())
# y_test = Variable(torch.from_numpy(test_y).float())
epochs = 500
lr = .01
# opt = torch.optim.Adam(model.parameters(), lr=lr)
# loss_func = nn.MSELoss()
loss_func = nn.L1Loss()
loss = training.run_training(model, (train_x, test_x, train_y, test_y),
loss_func, lr=lr, epochs=epochs,
print_every=100,
test_loss_func=torch.nn.functional.l1_loss)
return loss
if __name__ == '__main__':
run_model(mode='LSTM')
run_model(mode='GRU')
| [
"data_util.load_fx_10m_xy",
"training.run_training",
"torch.index_select",
"torch.nn.LSTM",
"torch.nn.L1Loss",
"sklearn.preprocessing.StandardScaler",
"torch.nn.Linear",
"torch.arange",
"torch.nn.GRU"
] | [((3384, 3438), 'data_util.load_fx_10m_xy', 'du.load_fx_10m_xy', ([], {'test_size': 'test_size', 'y_shape_mode': '(1)'}), '(test_size=test_size, y_shape_mode=1)\n', (3401, 3438), True, 'import data_util as du\n'), ((3704, 3724), 'sklearn.preprocessing.StandardScaler', 'skp.StandardScaler', ([], {}), '()\n', (3722, 3724), True, 'import sklearn.preprocessing as skp\n'), ((5598, 5609), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (5607, 5609), True, 'import torch.nn as nn\n'), ((5622, 5789), 'training.run_training', 'training.run_training', (['model', '(train_x, test_x, train_y, test_y)', 'loss_func'], {'lr': 'lr', 'epochs': 'epochs', 'print_every': '(100)', 'test_loss_func': 'torch.nn.functional.l1_loss'}), '(model, (train_x, test_x, train_y, test_y), loss_func,\n lr=lr, epochs=epochs, print_every=100, test_loss_func=torch.nn.\n functional.l1_loss)\n', (5643, 5789), False, 'import training\n'), ((571, 706), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_dim', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bias': 'bias', 'dropout': 'dropout', 'batch_first': 'batch_first'}), '(input_size=input_dim, hidden_size=hidden_size, num_layers=\n num_layers, bias=bias, dropout=dropout, batch_first=batch_first)\n', (578, 706), True, 'import torch.nn as nn\n'), ((836, 905), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * output_seq_len)', '(hidden_size * output_seq_len)'], {}), '(hidden_size * output_seq_len, hidden_size * output_seq_len)\n', (845, 905), True, 'import torch.nn as nn\n'), ((1188, 1226), 'torch.index_select', 'torch.index_select', (['z', '(0)', 'self.out_idx'], {}), '(z, 0, self.out_idx)\n', (1206, 1226), False, 'import torch\n'), ((1943, 2076), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_dim', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bias': 'bias', 'dropout': 'dropout', 'batch_first': 'batch_first'}), '(input_size=input_dim, hidden_size=hidden_size, num_layers=num_layers,\n bias=bias, dropout=dropout, batch_first=batch_first)\n', (1949, 2076), True, 'import torch.nn as nn\n'), ((2203, 2272), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * output_seq_len)', '(hidden_size * output_seq_len)'], {}), '(hidden_size * output_seq_len, hidden_size * output_seq_len)\n', (2212, 2272), True, 'import torch.nn as nn\n'), ((2555, 2593), 'torch.index_select', 'torch.index_select', (['z', '(0)', 'self.out_idx'], {}), '(z, 0, self.out_idx)\n', (2573, 2593), False, 'import torch\n'), ((513, 541), 'torch.arange', 'torch.arange', (['output_seq_len'], {}), '(output_seq_len)\n', (525, 541), False, 'import torch\n'), ((1885, 1913), 'torch.arange', 'torch.arange', (['output_seq_len'], {}), '(output_seq_len)\n', (1897, 1913), False, 'import torch\n')] |
import collections
from contextlib import contextmanager
import json
import os
import pytest
import consul.base
CB = consul.base.CB
Response = consul.base.Response
Request = collections.namedtuple(
'Request', ['method', 'path', 'params', 'data'])
class HTTPClient(object):
def __init__(self, base_uri, verify=True, cert=None, auth=None):
self.base_uri = base_uri
self.verify = verify
self.cert = cert
self.auth = auth
def get(self, callback, path, params=None):
return Request('get', path, params, None)
def put(self, callback, path, params=None, data=''):
return Request('put', path, params, data)
def delete(self, callback, path, params=None):
return Request('delete', path, params, None)
class Consul(consul.base.Consul):
def connect(self, base_uri, verify=True, cert=None, auth=None):
return HTTPClient(base_uri, verify=verify, cert=cert, auth=auth)
class TestEnvironment(object):
@contextmanager
def environ(self, **env):
original_env = {}
for key in env:
original_env[key] = os.getenv(key)
os.environ.update(env)
try:
yield
finally:
for key, value in original_env.items():
if value is None:
del os.environ[key]
else:
os.environ[key] = value
def test_CONSUL_HTTP_ADDR(self):
CONSUL_HTTP_ADDR = 'http://127.0.0.23:4242'
with self.environ(CONSUL_HTTP_ADDR=CONSUL_HTTP_ADDR):
c = Consul.from_env()
assert c.http.base_uri == CONSUL_HTTP_ADDR
def test_CONSUL_HTTP_ADDR_scheme_http(self):
CONSUL_HTTP_ADDR = '127.0.0.23:4242'
with self.environ(CONSUL_HTTP_ADDR=CONSUL_HTTP_ADDR):
c = Consul.from_env()
assert c.http.base_uri == 'http://' + CONSUL_HTTP_ADDR
def test_CONSUL_HTTP_ADDR_with_CONSUL_HTTP_SSL(self):
CONSUL_HTTP_ADDR = '127.0.0.23:4242'
with self.environ(CONSUL_HTTP_ADDR=CONSUL_HTTP_ADDR,
CONSUL_HTTP_SSL='true'):
c = Consul.from_env()
assert c.http.base_uri == 'https://' + CONSUL_HTTP_ADDR
def test_CONSUL_HTTP_TOKEN(self):
CONSUL_HTTP_TOKEN = '<PASSWORD>'
with self.environ(CONSUL_HTTP_TOKEN=CONSUL_HTTP_TOKEN):
c = Consul.from_env()
assert c.token == CONSUL_HTTP_TOKEN
def test_cert(self):
CONSUL_CLIENT_CERT = '/path/to/client.crt'
CONSUL_CLIENT_KEY = '/path/to/client.key'
with self.environ(CONSUL_CLIENT_CERT=CONSUL_CLIENT_CERT,
CONSUL_CLIENT_KEY=CONSUL_CLIENT_KEY):
c = Consul.from_env()
assert c.http.cert == (CONSUL_CLIENT_CERT, CONSUL_CLIENT_KEY)
def test_CONSUL_HTTP_AUTH(self):
CONSUL_HTTP_AUTH = 'username:s3cr3t'
with self.environ(CONSUL_HTTP_AUTH=CONSUL_HTTP_AUTH):
c = Consul.from_env()
assert c.http.auth == CONSUL_HTTP_AUTH.split(':')
def test_CONSUL_HTTP_SSL_VERIFY_True(self):
CONSUL_HTTP_SSL_VERIFY = 'true'
with self.environ(CONSUL_HTTP_SSL_VERIFY=CONSUL_HTTP_SSL_VERIFY):
c = Consul.from_env()
assert c.http.verify is True
def test_CONSUL_HTTP_SSL_VERIFY_False(self):
CONSUL_HTTP_SSL_VERIFY = 'false'
with self.environ(CONSUL_HTTP_SSL_VERIFY=CONSUL_HTTP_SSL_VERIFY):
c = Consul.from_env()
assert c.http.verify is False
def test_CONSUL_CACERT(self):
CONSUL_CACERT = '/path/to/ca.crt'
with self.environ(CONSUL_CACERT=CONSUL_CACERT):
c = Consul.from_env()
assert c.http.verify == CONSUL_CACERT
def _should_support(c):
return (
# kv
lambda **kw: c.kv.get('foo', **kw),
# catalog
c.catalog.nodes,
c.catalog.services,
lambda **kw: c.catalog.node('foo', **kw),
lambda **kw: c.catalog.service('foo', **kw),
# session
c.session.list,
lambda **kw: c.session.info('foo', **kw),
lambda **kw: c.session.node('foo', **kw),
)
def _should_support_node_meta(c):
return (
# catalog
c.catalog.nodes,
c.catalog.services,
lambda **kw: c.catalog.service('foo', **kw),
lambda **kw: c.catalog.register('foo', 'bar', **kw),
# health
lambda **kw: c.health.service('foo', **kw),
lambda **kw: c.health.checks('foo', **kw),
lambda **kw: c.health.state('unknown', **kw),
)
def _should_support_meta(c):
return (
# agent
lambda **kw: c.agent.service.register('foo', **kw),
lambda **kw: c.agent.service.register('foo', 'bar', **kw),
)
class TestIndex(object):
"""
Tests read requests that should support blocking on an index
"""
def test_index(self):
c = Consul()
for r in _should_support(c):
assert r().params == []
assert r(index='5').params == [('index', '5')]
class TestConsistency(object):
"""
Tests read requests that should support consistency modes
"""
def test_explict(self):
c = Consul()
for r in _should_support(c):
assert r().params == []
assert r(consistency='default').params == []
assert r(consistency='consistent').params == [('consistent', '1')]
assert r(consistency='stale').params == [('stale', '1')]
def test_implicit(self):
c = Consul(consistency='consistent')
for r in _should_support(c):
assert r().params == [('consistent', '1')]
assert r(consistency='default').params == []
assert r(consistency='consistent').params == [('consistent', '1')]
assert r(consistency='stale').params == [('stale', '1')]
class TestNodemeta(object):
"""
Tests read requests that should support node_meta
"""
def test_node_meta(self):
c = Consul()
for r in _should_support_node_meta(c):
assert r().params == []
assert sorted(r(node_meta={'env': 'prod', 'net': 1}).params) == \
sorted([('node-meta', 'net:1'), ('node-meta', 'env:prod')])
class TestMeta(object):
"""
Tests read requests that should support meta
"""
def test_meta(self):
c = Consul()
for r in _should_support_meta(c):
d = json.loads(r(meta={'env': 'prod', 'net': 1}).data)
assert sorted(d['meta']) == sorted({'env': 'prod', 'net': 1})
class TestCB(object):
def test_status_200_passes(self):
response = consul.base.Response(200, None, None)
CB._status(response)
@pytest.mark.parametrize(
'response, expected_exception',
[
(Response(400, None, None), consul.base.BadRequest),
(Response(401, None, None), consul.base.ACLDisabled),
(Response(403, None, None), consul.base.ACLPermissionDenied),
])
def test_status_4xx_raises_error(self, response, expected_exception):
with pytest.raises(expected_exception):
CB._status(response)
def test_status_404_allow_404(self):
response = Response(404, None, None)
CB._status(response, allow_404=True)
def test_status_404_dont_allow_404(self):
response = Response(404, None, None)
with pytest.raises(consul.base.NotFound):
CB._status(response, allow_404=False)
def test_status_405_raises_generic_ClientError(self):
response = Response(405, None, None)
with pytest.raises(consul.base.ClientError):
CB._status(response)
@pytest.mark.parametrize(
'response',
[
Response(500, None, None),
Response(599, None, None),
])
def test_status_5xx_raises_error(self, response):
with pytest.raises(consul.base.ConsulException):
CB._status(response)
class TestChecks(object):
"""
Check constructor helpers return valid check configurations.
"""
@pytest.mark.parametrize(
'url, interval, timeout, deregister, header, want', [
('http://example.com', '10s', None, None, None, {
'http': 'http://example.com',
'interval': '10s',
}),
('http://example.com', '10s', '1s', None, None, {
'http': 'http://example.com',
'interval': '10s',
'timeout': '1s',
}),
('http://example.com', '10s', None, '1m', None, {
'http': 'http://example.com',
'interval': '10s',
'DeregisterCriticalServiceAfter': '1m',
}),
('http://example.com', '10s', '1s', '1m', None, {
'http': 'http://example.com',
'interval': '10s',
'timeout': '1s',
'DeregisterCriticalServiceAfter': '1m',
}),
('http://example.com', '10s', '1s', '1m',
{'X-Test-Header': ['TestHeaderValue']},
{
'http': 'http://example.com',
'interval': '10s',
'timeout': '1s',
'DeregisterCriticalServiceAfter': '1m',
'header': {'X-Test-Header': ['TestHeaderValue']}
}
),
])
def test_http_check(self, url, interval, timeout, deregister, header,
want):
ch = consul.base.Check.http(url, interval, timeout=timeout,
deregister=deregister, header=header)
assert ch == want
@pytest.mark.parametrize(
'host, port, interval, timeout, deregister, want',
[
('localhost', 1234, '10s', None, None, {
'tcp': 'localhost:1234',
'interval': '10s',
}),
('localhost', 1234, '10s', '1s', None, {
'tcp': 'localhost:1234',
'interval': '10s',
'timeout': '1s',
}),
('localhost', 1234, '10s', None, '1m', {
'tcp': 'localhost:1234',
'interval': '10s',
'DeregisterCriticalServiceAfter': '1m',
}),
('localhost', 1234, '10s', '1s', '1m', {
'tcp': 'localhost:1234',
'interval': '10s',
'timeout': '1s',
'DeregisterCriticalServiceAfter': '1m',
}),
])
def test_tcp_check(self, host, port, interval, timeout, deregister, want):
ch = consul.base.Check.tcp(host, port, interval, timeout=timeout,
deregister=deregister)
assert ch == want
@pytest.mark.parametrize(
'container_id, shell, script, interval, deregister, want',
[
('wandering_bose', '/bin/sh', '/bin/true', '10s', None, {
'docker_container_id': 'wandering_bose',
'shell': '/bin/sh',
'script': '/bin/true',
'interval': '10s',
}),
('wandering_bose', '/bin/sh', '/bin/true', '10s', '1m', {
'docker_container_id': 'wandering_bose',
'shell': '/bin/sh',
'script': '/bin/true',
'interval': '10s',
'DeregisterCriticalServiceAfter': '1m',
}),
])
def test_docker_check(self, container_id, shell, script, interval,
deregister, want):
ch = consul.base.Check.docker(container_id, shell, script, interval,
deregister=deregister)
assert ch == want
def test_ttl_check(self):
ch = consul.base.Check.ttl('1m')
assert ch == {'ttl': '1m'}
| [
"collections.namedtuple",
"os.getenv",
"pytest.mark.parametrize",
"os.environ.update",
"pytest.raises"
] | [((179, 250), 'collections.namedtuple', 'collections.namedtuple', (['"""Request"""', "['method', 'path', 'params', 'data']"], {}), "('Request', ['method', 'path', 'params', 'data'])\n", (201, 250), False, 'import collections\n'), ((8105, 8989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""url, interval, timeout, deregister, header, want"""', "[('http://example.com', '10s', None, None, None, {'http':\n 'http://example.com', 'interval': '10s'}), ('http://example.com', '10s',\n '1s', None, None, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s'}), ('http://example.com', '10s', None, '1m', None, {\n 'http': 'http://example.com', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'}), ('http://example.com', '10s',\n '1s', '1m', None, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s', 'DeregisterCriticalServiceAfter': '1m'}), (\n 'http://example.com', '10s', '1s', '1m', {'X-Test-Header': [\n 'TestHeaderValue']}, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s', 'DeregisterCriticalServiceAfter': '1m', 'header': {\n 'X-Test-Header': ['TestHeaderValue']}})]"], {}), "('url, interval, timeout, deregister, header, want',\n [('http://example.com', '10s', None, None, None, {'http':\n 'http://example.com', 'interval': '10s'}), ('http://example.com', '10s',\n '1s', None, None, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s'}), ('http://example.com', '10s', None, '1m', None, {\n 'http': 'http://example.com', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'}), ('http://example.com', '10s',\n '1s', '1m', None, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s', 'DeregisterCriticalServiceAfter': '1m'}), (\n 'http://example.com', '10s', '1s', '1m', {'X-Test-Header': [\n 'TestHeaderValue']}, {'http': 'http://example.com', 'interval': '10s',\n 'timeout': '1s', 'DeregisterCriticalServiceAfter': '1m', 'header': {\n 'X-Test-Header': ['TestHeaderValue']}})])\n", (8128, 8989), False, 'import pytest\n'), ((9713, 10276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""host, port, interval, timeout, deregister, want"""', "[('localhost', 1234, '10s', None, None, {'tcp': 'localhost:1234',\n 'interval': '10s'}), ('localhost', 1234, '10s', '1s', None, {'tcp':\n 'localhost:1234', 'interval': '10s', 'timeout': '1s'}), ('localhost', \n 1234, '10s', None, '1m', {'tcp': 'localhost:1234', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'}), ('localhost', 1234, '10s',\n '1s', '1m', {'tcp': 'localhost:1234', 'interval': '10s', 'timeout':\n '1s', 'DeregisterCriticalServiceAfter': '1m'})]"], {}), "('host, port, interval, timeout, deregister, want',\n [('localhost', 1234, '10s', None, None, {'tcp': 'localhost:1234',\n 'interval': '10s'}), ('localhost', 1234, '10s', '1s', None, {'tcp':\n 'localhost:1234', 'interval': '10s', 'timeout': '1s'}), ('localhost', \n 1234, '10s', None, '1m', {'tcp': 'localhost:1234', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'}), ('localhost', 1234, '10s',\n '1s', '1m', {'tcp': 'localhost:1234', 'interval': '10s', 'timeout':\n '1s', 'DeregisterCriticalServiceAfter': '1m'})])\n", (9736, 10276), False, 'import pytest\n'), ((10819, 11298), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""container_id, shell, script, interval, deregister, want"""', "[('wandering_bose', '/bin/sh', '/bin/true', '10s', None, {\n 'docker_container_id': 'wandering_bose', 'shell': '/bin/sh', 'script':\n '/bin/true', 'interval': '10s'}), ('wandering_bose', '/bin/sh',\n '/bin/true', '10s', '1m', {'docker_container_id': 'wandering_bose',\n 'shell': '/bin/sh', 'script': '/bin/true', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'})]"], {}), "(\n 'container_id, shell, script, interval, deregister, want', [(\n 'wandering_bose', '/bin/sh', '/bin/true', '10s', None, {\n 'docker_container_id': 'wandering_bose', 'shell': '/bin/sh', 'script':\n '/bin/true', 'interval': '10s'}), ('wandering_bose', '/bin/sh',\n '/bin/true', '10s', '1m', {'docker_container_id': 'wandering_bose',\n 'shell': '/bin/sh', 'script': '/bin/true', 'interval': '10s',\n 'DeregisterCriticalServiceAfter': '1m'})])\n", (10842, 11298), False, 'import pytest\n'), ((1143, 1165), 'os.environ.update', 'os.environ.update', (['env'], {}), '(env)\n', (1160, 1165), False, 'import os\n'), ((1120, 1134), 'os.getenv', 'os.getenv', (['key'], {}), '(key)\n', (1129, 1134), False, 'import os\n'), ((7115, 7148), 'pytest.raises', 'pytest.raises', (['expected_exception'], {}), '(expected_exception)\n', (7128, 7148), False, 'import pytest\n'), ((7420, 7455), 'pytest.raises', 'pytest.raises', (['consul.base.NotFound'], {}), '(consul.base.NotFound)\n', (7433, 7455), False, 'import pytest\n'), ((7624, 7662), 'pytest.raises', 'pytest.raises', (['consul.base.ClientError'], {}), '(consul.base.ClientError)\n', (7637, 7662), False, 'import pytest\n'), ((7914, 7956), 'pytest.raises', 'pytest.raises', (['consul.base.ConsulException'], {}), '(consul.base.ConsulException)\n', (7927, 7956), False, 'import pytest\n')] |
'''
This file implements JPP-Net for human parsing and pose detection.
'''
import tensorflow as tf
import os
from tensorflow.python.framework import graph_util
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow.python.platform import gfile
import time
class JPP(object):
# Magic numbers are for normalization. You can get details from original JPP-Net repo.
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
def __init__(self, pb_path):
options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=options))
self.sess = tf.Session()
with gfile.FastGFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.sess.graph.as_default()
tf.import_graph_def(graph_def, name='') # import compute graph
self.sess.run(tf.global_variables_initializer())
self.img_tensor = sess.graph.get_tensor_by_name('img_1:0')
self.pose_tensor = sess.graph.get_tensor_by_name('pose:0')
self.parse_tensor = sess.graph.get_tensor_by_name('parse:0')
def predict(self, img):
'''
img is a human image array with shape (any,any,3)
return a list, [pose, parse]
'''
ret = self.sess.run([self.pose_tensor,self.parse_tensor], feed_dict={self.img_tensor: img-JPP.IMG_MEAN})
return ret | [
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.import_graph_def",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions"
] | [((423, 493), 'numpy.array', 'np.array', (['(104.00698793, 116.66876762, 122.67891434)'], {'dtype': 'np.float32'}), '((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)\n', (431, 493), True, 'import numpy as np\n'), ((548, 580), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (561, 580), True, 'import tensorflow as tf\n'), ((672, 684), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (682, 684), True, 'import tensorflow as tf\n'), ((698, 728), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (713, 728), False, 'from tensorflow.python.platform import gfile\n'), ((759, 772), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (770, 772), True, 'import tensorflow as tf\n'), ((874, 913), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (893, 913), True, 'import tensorflow as tf\n'), ((959, 992), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (990, 992), True, 'import tensorflow as tf\n'), ((614, 649), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'options'}), '(gpu_options=options)\n', (628, 649), True, 'import tensorflow as tf\n')] |
import itertools
import subprocess
import sys
import pytest
from arca import Arca, Task, CurrentEnvironmentBackend
from arca.utils import logger
from arca.exceptions import BuildError
from common import BASE_DIR, RETURN_COLORAMA_VERSION_FUNCTION, SECOND_RETURN_STR_FUNCTION, TEST_UNICODE
def _pip_action(action, package):
if action not in ["install", "uninstall"]:
raise ValueError(f"{action} is invalid value for action")
cmd = [sys.executable, "-m", "pip", action]
if action == "uninstall":
cmd += ["-y"]
cmd += [package]
logger.info("Installing requirements with command: %s", cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_stream, err_stream = process.communicate()
out_stream = out_stream.decode("utf-8")
err_stream = err_stream.decode("utf-8")
logger.debug("Return code is %s", process.returncode)
logger.debug(out_stream)
logger.debug(err_stream)
@pytest.mark.parametrize(
["requirements_location", "file_location"], list(itertools.product(
(None, "requirements/requirements.txt"),
(None, "test_package"),
))
)
def test_current_environment_backend(temp_repo_func, requirements_location, file_location):
""" Tests the basic stuff around backends, if it can install requirements from more locations,
launch stuff with correct cwd, works well with multiple branches, etc
"""
kwargs = {}
if requirements_location is not None:
kwargs["requirements_location"] = requirements_location
if file_location is not None:
kwargs["cwd"] = file_location
backend = CurrentEnvironmentBackend(verbosity=2, **kwargs)
arca = Arca(backend=backend, base_dir=BASE_DIR)
if file_location is None:
filepath = temp_repo_func.file_path
else:
filepath = temp_repo_func.repo_path / file_location / "test_file.py"
filepath.parent.mkdir(exist_ok=True, parents=True)
temp_repo_func.file_path.replace(filepath)
temp_repo_func.repo.index.remove([str(temp_repo_func.file_path)])
temp_repo_func.repo.index.add([str(filepath)])
temp_repo_func.repo.index.commit("Initial")
task = Task("test_file:return_str_function")
assert arca.run(temp_repo_func.url, temp_repo_func.branch, task).output == "Some string"
filepath.write_text(SECOND_RETURN_STR_FUNCTION)
temp_repo_func.repo.create_head("new_branch")
temp_repo_func.repo.create_tag("test_tag")
temp_repo_func.repo.index.add([str(filepath)])
temp_repo_func.repo.index.commit("Updated function")
assert arca.run(temp_repo_func.url, temp_repo_func.branch, task).output == TEST_UNICODE
# in the other branch there's still the original
assert arca.run(temp_repo_func.url, "new_branch", task).output == "Some string"
# test that tags work as well
assert arca.run(temp_repo_func.url, "test_tag", task).output == "Some string"
temp_repo_func.repo.branches.master.checkout()
requirements_path = temp_repo_func.repo_path / backend.requirements_location
requirements_path.parent.mkdir(exist_ok=True, parents=True)
requirements_path.write_text("colorama==0.3.9")
filepath.write_text(RETURN_COLORAMA_VERSION_FUNCTION)
temp_repo_func.repo.index.add([str(filepath), str(requirements_path)])
temp_repo_func.repo.index.commit("Added requirements, changed to version")
# check that it's not installed from previous tests
_pip_action("uninstall", "colorama")
with pytest.raises(ModuleNotFoundError):
import colorama # noqa
# CurrentEnv fails because it ignores requirements
with pytest.raises(BuildError):
assert arca.run(temp_repo_func.url, temp_repo_func.branch, task).output == "0.3.9"
# but when it's installed locally then it succeeds
_pip_action("install", "colorama==0.3.9")
assert arca.run(temp_repo_func.url, temp_repo_func.branch, task).output == "0.3.9"
# cleanup
_pip_action("uninstall", "colorama")
with pytest.raises(ModuleNotFoundError):
import colorama # noqa
| [
"arca.Task",
"subprocess.Popen",
"arca.utils.logger.debug",
"itertools.product",
"arca.Arca",
"pytest.raises",
"arca.CurrentEnvironmentBackend",
"arca.utils.logger.info"
] | [((568, 628), 'arca.utils.logger.info', 'logger.info', (['"""Installing requirements with command: %s"""', 'cmd'], {}), "('Installing requirements with command: %s', cmd)\n", (579, 628), False, 'from arca.utils import logger\n'), ((644, 713), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (660, 713), False, 'import subprocess\n'), ((860, 913), 'arca.utils.logger.debug', 'logger.debug', (['"""Return code is %s"""', 'process.returncode'], {}), "('Return code is %s', process.returncode)\n", (872, 913), False, 'from arca.utils import logger\n'), ((918, 942), 'arca.utils.logger.debug', 'logger.debug', (['out_stream'], {}), '(out_stream)\n', (930, 942), False, 'from arca.utils import logger\n'), ((947, 971), 'arca.utils.logger.debug', 'logger.debug', (['err_stream'], {}), '(err_stream)\n', (959, 971), False, 'from arca.utils import logger\n'), ((1650, 1698), 'arca.CurrentEnvironmentBackend', 'CurrentEnvironmentBackend', ([], {'verbosity': '(2)'}), '(verbosity=2, **kwargs)\n', (1675, 1698), False, 'from arca import Arca, Task, CurrentEnvironmentBackend\n'), ((1711, 1751), 'arca.Arca', 'Arca', ([], {'backend': 'backend', 'base_dir': 'BASE_DIR'}), '(backend=backend, base_dir=BASE_DIR)\n', (1715, 1751), False, 'from arca import Arca, Task, CurrentEnvironmentBackend\n'), ((2218, 2255), 'arca.Task', 'Task', (['"""test_file:return_str_function"""'], {}), "('test_file:return_str_function')\n", (2222, 2255), False, 'from arca import Arca, Task, CurrentEnvironmentBackend\n'), ((3527, 3561), 'pytest.raises', 'pytest.raises', (['ModuleNotFoundError'], {}), '(ModuleNotFoundError)\n', (3540, 3561), False, 'import pytest\n'), ((3660, 3685), 'pytest.raises', 'pytest.raises', (['BuildError'], {}), '(BuildError)\n', (3673, 3685), False, 'import pytest\n'), ((4035, 4069), 'pytest.raises', 'pytest.raises', (['ModuleNotFoundError'], {}), '(ModuleNotFoundError)\n', (4048, 4069), False, 'import pytest\n'), ((1053, 1139), 'itertools.product', 'itertools.product', (["(None, 'requirements/requirements.txt')", "(None, 'test_package')"], {}), "((None, 'requirements/requirements.txt'), (None,\n 'test_package'))\n", (1070, 1139), False, 'import itertools\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import re
import json
import os
import time
import openpyxl as opx
def parse_arguments():
print(sys.argv)
me_report_path = sys.argv[1]
log_path = sys.argv[2]
n_iter = sys.argv[3]
out = sys.argv[4]
assert n_iter.isdigit()
return me_report_path, log_path, int(n_iter), out
def extract_by_keyword(doc, keyword, pattern):
rst = []
for i, s in enumerate(doc):
if keyword in s:
p = re.findall(pattern, s)
print("L%d: extracted %s from '%s'" % (i, p, s.strip()))
rst.extend(p)
return rst
def process_log(fname, log_path, n_iter, keyword, pattern):
rnt = {}
for i in range(1, 1+n_iter):
fname_path = os.path.join(log_path, fname % i)
with open(fname_path) as f:
print("\nLoading %s" % fname_path)
rst = extract_by_keyword(f, keyword, pattern)
rnt[fname % i] = rst
return rnt
def summarize(func):
def wrapper(*args, **kwargs):
log = func(*args, **kwargs)
times = list(log.items())
times.sort(key=lambda x: x[1])
min_file, min_time = times[0]
avg = sum(map(lambda x: x[1], times)) / len(times)
log["min_time"] = min_time
log["min_file"] = min_file
log["avg_time"] = avg
return log
return wrapper
@summarize
def process_bert_log(log_path, n_iter):
fname = "bert%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_resnet_log(log_path, n_iter):
fname = "resnet%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_gpt_log(log_path, n_iter):
fname = "gpt%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_reid_log(log_path, n_iter):
log = {}
for i in range(8):
fname = "reid_%d_"+str(i)+".log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
def write_to_me_report(log, me_report_path):
wb = opx.load_workbook(me_report_path)
sheet = wb["Sheet"]
idx = sheet.max_row + 1
date = time.strftime('%m%d', time.localtime())
sheet['A%d' % idx] = date
sheet['B%d' % idx] = round(log["reid"]["min_time"], 2)
sheet['C%d' % idx] = round(log["bert"]["min_time"], 2)
sheet['D%d' % idx] = round(log['resnet']["min_time"], 2)
sheet['E%d' % idx] = round(log['gpt']["min_time"], 2)
wb.save(me_report_path)
def generate_report():
me_report_path, log_path, n_iter, out = parse_arguments()
log_data = {}
bert_log = process_bert_log(log_path, n_iter)
resnet_log = process_resnet_log(log_path, n_iter)
gpt_log = process_gpt_log(log_path, n_iter)
reid_log = process_reid_log(log_path, n_iter)
log_data["bert"] = bert_log
log_data["resnet"] = resnet_log
log_data["gpt"] = gpt_log
log_data["reid"] = reid_log
with open(out, "w") as f:
json.dump(log_data, f, indent=2)
write_to_me_report(log_data, me_report_path)
if __name__ == "__main__":
generate_report()
| [
"time.localtime",
"openpyxl.load_workbook",
"os.path.join",
"re.findall",
"json.dump"
] | [((3489, 3522), 'openpyxl.load_workbook', 'opx.load_workbook', (['me_report_path'], {}), '(me_report_path)\n', (3506, 3522), True, 'import openpyxl as opx\n'), ((1376, 1409), 'os.path.join', 'os.path.join', (['log_path', '(fname % i)'], {}), '(log_path, fname % i)\n', (1388, 1409), False, 'import os\n'), ((3608, 3624), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3622, 3624), False, 'import time\n'), ((4396, 4428), 'json.dump', 'json.dump', (['log_data', 'f'], {'indent': '(2)'}), '(log_data, f, indent=2)\n', (4405, 4428), False, 'import json\n'), ((1114, 1136), 're.findall', 're.findall', (['pattern', 's'], {}), '(pattern, s)\n', (1124, 1136), False, 'import re\n')] |
#
# GeomProc: geometry processing library in python + numpy
#
# Copyright (c) 2008-2021 <NAME> <<EMAIL>>
# under the MIT License.
#
# See file LICENSE.txt for details on the copyright license.
#
"""This module contains the implicit function class of the GeomProc
geometry processing library used for defining implicit functions and
performing surface reconstruction.
"""
import numpy as np
import math
import random
# Implicit surface class
class impsurf:
"""A class that defines an implicit function
Attributes
----------
evaluate = pointer to a function(array_like x) : float
Function used for evaluating the implicit function at a 3D point
x, returning the signed distance of the surface to point x.
Notes
-----
An implicit function can be setup by calling one of the setup_<name>
methods. After that, the implicit function can be evaluated by
simply calling the impsurf.evaluate(x) method.
"""
def __init__(self):
self.evaluate = None
def compute_displaced_samples(self, pc, epsilon):
"""Create a set of samples displaced along point normals
Parameters
----------
pc : geomproc.pcloud
Input point cloud stored as a point cloud object. Note that
the point cloud needs to have normal vectors associated to
the points
epsilon : float
Amount of displacement to perform along normals
Returns
-------
None
Notes
-----
Given an input point cloud, this method creates a set of samples
that can be used for RBF surface reconstruction. Given an input
point cloud with n points, the method creates a sample set with
n*2 points, where n points are the original points from the
input point cloud, and another n points are created by
displacing each original sample along its normal by a value of
epsilon. The samples are stored in the temporary attribute of
the class called "sample", which is of shape (n*2, 3). Moreover,
the method also creates a vector of displacements called
"displacement", which is of shape (n*2, 1). The vector stores
the displacement of each sample, which is zero for the original
samples and epsilon for the new samples.
See Also
--------
geomproc.impsurf.impsurf.setup_rbf
"""
# Check if points have normals
if pc.normal.shape[0] == 0:
raise RuntimeError('point cloud does not have normals')
# Get number of points in cloud
n = pc.point.shape[0]
# Initialize samples and their displacements
self.sample = np.zeros((n*2, 3))
self.displacement = np.zeros((n*2, 1))
# The first portion of the samples are simply the points in the
# point cloud with displacement 0
self.sample[0:n, :] = pc.point
# Add additional samples displaced from the surface by epsilon. The
# samples are displaced along the normal direction
for i in range(n):
self.sample[n+i, :] = pc.point[i, :] + pc.normal[i, :]*epsilon
self.displacement[n+i] = epsilon
def compute_rbf(self, kernel, vectorized=False):
"""Reconstruct an implicit function from a set of point samples
Parameters
----------
kernel : function
Kernel function of the form kernel(x, y) : float that
computes the dissimilarity between two 3D points x and y,
e.g., kernel = lambda x, y: math.pow(np.linalg.norm(x - y), 3)
vectorized : boolean
If vectorized is True, the method assumes that the kernel
supplied function applies the kernel function to two sets of
points, resulting in a matrix of shape (m, n) for sets of
samples with m and n points. The default value of vectorized
is False
Returns
-------
None
Notes
-----
The method reconstructs an implicit function from a set of point
samples using the RBF method. The method assumes that a set of
samples and displacements have been stored in the temporary
attributes "sample" and "displacement", as described in the help
of method surfrec.impsurf.compute_displaced_samples. The method
then stores a temporary attribute "w" that represents the
weights of radial basis functions (RBFs). The weights define the
implicit function in the form phi(x) = \sum_{i=1}^n
w(i)*kernel(x, sample(i)). The method also stores the given
kernel in the temporary attribute "kernel".
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.setup_rbf
"""
# Check the type of kernel we are using
if vectorized:
# Apply vectorized kernel
self.K = kernel(self.sample, self.sample)
if self.K.shape != (self.sample.shape[0], self.sample.shape[0]):
raise RuntimeError('vectorized kernel returns output of invalid size '+str(self.K.shape))
else:
# Get number of samples
n = self.sample.shape[0]
# Initialize matrix
self.K = np.zeros((n, n))
# Fill matrix entries
for i in range(n):
for j in range(n):
self.K[i, j] = kernel(self.sample[i, :], self.sample[j, :])
# Solve linear system
self.w = np.linalg.solve(self.K, self.displacement)
# Save kernel
self.kernel = kernel
# Remember kernel type
self.vectorized = vectorized
def evaluate_rbf(self, x):
"""Evaluate an implicit function encoded as an RBF
Parameters
----------
x : array_like
3D point where the RBF should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point x
Notes
-----
The method returns the value of the implicit function at a given
point x. The value is typically the signed distance of the point
to the surface. The method assumes that temporary attributes
"sample", "kernel", and "w" have been stored in the class, as
described in the help of methods
surfrec.impsurf.compute_displaced_samples and surfrec.impsurf.compute_rbf.
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.compute_rbf
geomproc.impsurf.impsurf.setup_rbf
"""
if self.vectorized:
# Make sure input point is a row vector
inx = np.array(x)
if inx.shape[0] > 1:
inx = x[np.newaxis, :]
# Call kernel with all samples
diff = self.kernel(inx, self.sample)
# RBF
y = np.sum(self.w*diff.T)
else:
y = 0.0
for i in range(self.sample.shape[0]):
y += self.w[i]*self.kernel(x, self.sample[i, :])
return y
def setup_rbf(self, pc, epsilon, kernel, vectorized=False):
"""Setup an implicit function based on a set of point samples
Parameters
----------
pc : geomproc.pcloud
Input point cloud stored as a point cloud object. Note that
the point cloud needs to have normal vectors associated to
the points
epsilon : float
Amount of displacement to perform along normals
kernel : function
Kernel function of the form kernel(x, y) : float that
computes the dissimilarity between two 3D points x and y,
e.g., kernel = lambda x, y: math.pow(np.linalg.norm(x - y), 3)
vectorized : boolean
If vectorized is True, the method assumes that the kernel
supplied function applies the kernel function to two sets of
points, resulting in a matrix of shape (m, n) for sets of
samples with m and n points. The default value of vectorized
is False
Returns
-------
None
Notes
-----
Setup an implicit function by reconstructing the function from a
set of point samples using the RBF method. The method first
displaces the original point samples by a certain amount
epsilon, to create additional samples that help avoid a trivial
solution to the surface reconstruction problem. Then, the method
reconstructs a surface with the RBF method based on the given
kernel and solving a linear system. Once the implicit function
is setup, it can be evaluated with the "evaluate" method of the
class, which is a pointer to surfrec.impsurf.evalute_rbf.
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.compute_rbf
geomproc.impsurf.impsurf.evaluate_rbf
"""
self.compute_displaced_samples(pc, epsilon)
self.compute_rbf(kernel, vectorized)
self.evaluate = self.evaluate_rbf
def evaluate_sphere(self, p):
"""Evaluate the implicit function of a sphere
Parameters
----------
p : array_like
3D point where the sphere should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point p
Notes
-----
The method evaluates the implicit function of a sphere at a
given point. The method assumes that the center and radius of
the sphere have been stored in the temporary attributes "center"
and "sphere" by the method surfrec.impsurf.setup_sphere.
See Also
--------
geomproc.impsurf.impsurf.setup_sphere
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_sphere(0.5)
>>> val = surf.evaluate([0, 0, 0])
"""
return ((p[0] - self.center[0])*(p[0] - self.center[0]) +
(p[1] - self.center[1])*(p[1] - self.center[1]) +
(p[2] - self.center[2])*(p[2] - self.center[2]) -
self.radius*self.radius)
def setup_sphere(self, radius=1.0, center=[0.0, 0.0, 0.0]):
"""Setup the implicit function of a sphere
Parameters
----------
radius : float
Scalar representing the radius of the sphere (the default
value is 1)
center : array_like
3D point representing the center of the sphere (the default
value is the origin)
Returns
-------
None
Notes
-----
The method sets up the implicit function for a sphere with a
given center and radius. Once the implicit function is setup, it
can be evaluated with the "evaluate" method of the class, which
is a pointer to surfrec.evaluate_sphere.
See Also
--------
geomproc.impsurf.impsurf.evaluate_sphere
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_sphere(0.5)
>>> val = surf.evaluate([0, 0, 0])
"""
self.center = center
self.radius = radius
self.evaluate = self.evaluate_sphere
def evaluate_torus(self, p):
"""Evaluate the implicit function of a torus
Parameters
----------
p : array_like
3D point where the sphere should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point p
Notes
-----
The method evaluates the implicit function of a torus at a given
point. The method assumes that the two scalars "radius1" and
"radius2" that describe the torus have been saved into temporary
attributes of the class by the method
surfrec.impsurf.setup_torus.
See Also
--------
geomproc.impsurf.impsurf.setup_torus
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_torus(0.6, 0.3)
>>> val = surf.evaluate([0, 0, 0])
"""
return math.pow(math.sqrt(p[0]*p[0] + p[1]*p[1]) - self.radius1, 2) + p[2]*p[2] - self.radius2*self.radius2
def setup_torus(self, radius1, radius2):
"""Setup the implicit function of a torus
Parameters
----------
radius1 : float
The distance from the center of the tube to the center of the torus
radius2: float
Radius of the tube
Returns
-------
None
Notes
-----
The method sets up the implicit function for a torus which is
radially symmetric about the z-axis. Once the implicit function
is setup, it can be evaluated with the "evaluate" method of the
class, which is a pointer to surfrec.evaluate_torus.
See Also
--------
geomproc.impsurf.impsurf.evaluate_torus
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_torus(0.6, 0.3)
>>> val = surf.evaluate([0, 0, 0])
"""
self.radius1 = radius1
self.radius2 = radius2
self.evaluate = self.evaluate_torus
| [
"numpy.linalg.solve",
"math.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.sum"
] | [((2726, 2746), 'numpy.zeros', 'np.zeros', (['(n * 2, 3)'], {}), '((n * 2, 3))\n', (2734, 2746), True, 'import numpy as np\n'), ((2773, 2793), 'numpy.zeros', 'np.zeros', (['(n * 2, 1)'], {}), '((n * 2, 1))\n', (2781, 2793), True, 'import numpy as np\n'), ((5602, 5644), 'numpy.linalg.solve', 'np.linalg.solve', (['self.K', 'self.displacement'], {}), '(self.K, self.displacement)\n', (5617, 5644), True, 'import numpy as np\n'), ((5355, 5371), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (5363, 5371), True, 'import numpy as np\n'), ((6825, 6836), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6833, 6836), True, 'import numpy as np\n'), ((7035, 7058), 'numpy.sum', 'np.sum', (['(self.w * diff.T)'], {}), '(self.w * diff.T)\n', (7041, 7058), True, 'import numpy as np\n'), ((12509, 12545), 'math.sqrt', 'math.sqrt', (['(p[0] * p[0] + p[1] * p[1])'], {}), '(p[0] * p[0] + p[1] * p[1])\n', (12518, 12545), False, 'import math\n')] |
import os
import csv
import subprocess
import matplotlib.pyplot as plt
from math import ceil
from tqdm import tqdm
from pandas import read_csv
from netCDF4 import Dataset, num2date
from multiprocessing import cpu_count, Process
from .plot import plot_filtered_profiles_data
def download_data(files, storage_path):
# Download data from Argo rsync's server
with tqdm(total=files.shape[0]) as pbar:
for file in files:
subprocess.call(["rsync", "-azh", f"vdmzrs.ifremer.fr::argo/{file}", storage_path])
pbar.update(1)
def filter_point_in_polygon(data, start, end, polygon, thread, storage_path, file_name, source_path):
N = len(polygon)
with open(f'{storage_path}/{file_name}-{thread}.csv', 'a', newline='') as file:
writer = csv.writer(file)
for i in range(start, end):
# Point-in-polygon filter
if(is_inside_the_polygon(polygon, N, [data.latitude.values[i],data.longitude.values[i]])):
writer.writerow(data.values[i])
def get_data_from_nc(data, start, end, polygon, thread, storage_path, file_name, source_path):
with open(f'{storage_path}/{file_name}-{thread}.csv', 'a', newline='') as file:
writer = csv.writer(file)
measurements = []
for k in range(start, end):
try:
# Extract data from NetCDF files
nc = Dataset(f"{source_path}/{data.values[k]}")
PLATFORM_NUMBER = nc.variables['PLATFORM_NUMBER'][:]
CYCLE_NUMBER = nc.variables['CYCLE_NUMBER'][:]
DATA_MODE = nc.variables['DATA_MODE'][:]
JULD = nc.variables['JULD']
JULD = num2date(JULD[:],JULD.units)
LATITUDE = nc.variables['LATITUDE'][:]
LONGITUDE = nc.variables['LONGITUDE'][:]
PRES = nc.variables['PRES'][:]
PRES_ADJUSTED = nc.variables['PRES_ADJUSTED'][:]
TEMP = nc.variables['TEMP'][:]
TEMP_ADJUSTED = nc.variables['TEMP_ADJUSTED'][:]
PSAL = nc.variables['PSAL'][:]
PSAL_ADJUSTED = nc.variables['PSAL_ADJUSTED'][:]
for j in range(PRES_ADJUSTED.shape[1]):
if(str(DATA_MODE[0], 'utf-8').strip() == 'R'):
if(PRES[0][j] > 0 and TEMP[0][j] > 0 and PSAL[0][j] > 0):
measurements.append([str(PLATFORM_NUMBER[0], 'utf-8').strip(),CYCLE_NUMBER[0],str(DATA_MODE[0], 'utf-8').strip(),JULD[0],LATITUDE[0],LONGITUDE[0],PRES[0][j],TEMP[0][j],PSAL[0][j]])
else:
if(PRES_ADJUSTED[0][j] > 0 and TEMP_ADJUSTED[0][j] > 0 and PSAL_ADJUSTED[0][j] > 0):
measurements.append([str(PLATFORM_NUMBER[0], 'utf-8').strip(),CYCLE_NUMBER[0],str(DATA_MODE[0], 'utf-8').strip(),JULD[0],LATITUDE[0],LONGITUDE[0],PRES_ADJUSTED[0][j],TEMP_ADJUSTED[0][j],PSAL_ADJUSTED[0][j]])
except:
print(f"File [error]: {data.values[k]}")
writer.writerows(measurements)
def get_data_from_source(files, source_path, storage_path):
columns = ['PLATFORM_NUMBER','CYCLE_NUMBER','DATA_MODE','DATE','LATITUDE','LONGITUDE','PRES','TEMP','PSAL']
# Execute parallel computation with function "get_data_from_nc"
exec_parallel_computation(files, columns, get_data_from_nc, storage_path, "measurements", source_path=source_path)
def get_index(storage_path):
subprocess.call(["rsync", "-azh", "vdmzrs.ifremer.fr::argo-index/ar_index_global_prof.txt", storage_path])
def get_profiles_within_polygon(data, polygon, storage_path):
# Maximum and minimum filter
filtered_data = data[(data.latitude > polygon.latitude.min()) & (data.latitude < polygon.latitude.max()) & (data.longitude > polygon.longitude.min()) & (data.longitude < polygon.longitude.max())].reset_index()
# Execute parallel computation
exec_parallel_computation(filtered_data, filtered_data.columns, filter_point_in_polygon, storage_path, "filtered_profiles", polygon)
filtered_profiles = read_csv(f"{storage_path}/filtered_profiles.csv")
#Plot study area
plot_filtered_profiles_data(polygon, filtered_profiles, data, storage_path)
return filtered_profiles
def is_inside_the_polygon(polygon, N, p):
xinters = 0
counter = 0
p1 = polygon.iloc[0]
# Even-odd algorithm
for i in range(1, N+1):
p2 = polygon.iloc[i % N]
if (p[0] > min(p1[0],p2[0])):
if (p[0] <= max(p1[0],p2[0])):
if (p[1] <= max(p1[1],p2[1])):
if (p1[0] != p2[0]):
xinters = (p[0]-p1[0])*(p2[1]-p1[1])/(p2[0]-p1[0])+p1[1]
if (p1[1] == p2[1] or p[1] <= xinters):
counter += 1
p1 = p2
return counter % 2 != 0
def exec_parallel_computation(data, columns, function, storage_path, file_name, polygon=[], source_path=""):
# Get number of CPUs in the system
processes = []
cpucount = cpu_count()
r_range = ceil(data.shape[0]/cpucount)
# Parallel computation
for i in range(cpucount):
with open(f"{storage_path}/{file_name}-{i}.csv", 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(columns)
start = i * r_range
end = start + r_range
if(end > data.shape[0]):
end = data.shape[0]
p = Process(target=function, args=(data, start, end, polygon, i, storage_path, file_name, source_path))
processes.append(p)
p.start()
# Block threads until the process join() method is called
for p in processes:
p.join()
# Collect parallel compute data
filtered_profiles_path = f"{storage_path}/{file_name}.csv"
with open(filtered_profiles_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(columns)
for i in range(cpucount):
writer.writerows(read_csv(f"{storage_path}/{file_name}-{i}.csv").values)
os.remove(f"{storage_path}/{file_name}-{i}.csv")
| [
"math.ceil",
"pandas.read_csv",
"netCDF4.num2date",
"multiprocessing.Process",
"tqdm.tqdm",
"csv.writer",
"netCDF4.Dataset",
"multiprocessing.cpu_count",
"subprocess.call",
"os.remove"
] | [((3054, 3164), 'subprocess.call', 'subprocess.call', (["['rsync', '-azh', 'vdmzrs.ifremer.fr::argo-index/ar_index_global_prof.txt',\n storage_path]"], {}), "(['rsync', '-azh',\n 'vdmzrs.ifremer.fr::argo-index/ar_index_global_prof.txt', storage_path])\n", (3069, 3164), False, 'import subprocess\n'), ((3667, 3716), 'pandas.read_csv', 'read_csv', (['f"""{storage_path}/filtered_profiles.csv"""'], {}), "(f'{storage_path}/filtered_profiles.csv')\n", (3675, 3716), False, 'from pandas import read_csv\n'), ((4486, 4497), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4495, 4497), False, 'from multiprocessing import cpu_count, Process\n'), ((4512, 4542), 'math.ceil', 'ceil', (['(data.shape[0] / cpucount)'], {}), '(data.shape[0] / cpucount)\n', (4516, 4542), False, 'from math import ceil\n'), ((371, 397), 'tqdm.tqdm', 'tqdm', ([], {'total': 'files.shape[0]'}), '(total=files.shape[0])\n', (375, 397), False, 'from tqdm import tqdm\n'), ((770, 786), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (780, 786), False, 'import csv\n'), ((1164, 1180), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (1174, 1180), False, 'import csv\n'), ((4891, 4994), 'multiprocessing.Process', 'Process', ([], {'target': 'function', 'args': '(data, start, end, polygon, i, storage_path, file_name, source_path)'}), '(target=function, args=(data, start, end, polygon, i, storage_path,\n file_name, source_path))\n', (4898, 4994), False, 'from multiprocessing import cpu_count, Process\n'), ((5320, 5336), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (5330, 5336), False, 'import csv\n'), ((446, 533), 'subprocess.call', 'subprocess.call', (["['rsync', '-azh', f'vdmzrs.ifremer.fr::argo/{file}', storage_path]"], {}), "(['rsync', '-azh', f'vdmzrs.ifremer.fr::argo/{file}',\n storage_path])\n", (461, 533), False, 'import subprocess\n'), ((4702, 4718), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (4712, 4718), False, 'import csv\n'), ((5501, 5549), 'os.remove', 'os.remove', (['f"""{storage_path}/{file_name}-{i}.csv"""'], {}), "(f'{storage_path}/{file_name}-{i}.csv')\n", (5510, 5549), False, 'import os\n'), ((1285, 1327), 'netCDF4.Dataset', 'Dataset', (['f"""{source_path}/{data.values[k]}"""'], {}), "(f'{source_path}/{data.values[k]}')\n", (1292, 1327), False, 'from netCDF4 import Dataset, num2date\n'), ((1524, 1553), 'netCDF4.num2date', 'num2date', (['JULD[:]', 'JULD.units'], {}), '(JULD[:], JULD.units)\n', (1532, 1553), False, 'from netCDF4 import Dataset, num2date\n'), ((5433, 5480), 'pandas.read_csv', 'read_csv', (['f"""{storage_path}/{file_name}-{i}.csv"""'], {}), "(f'{storage_path}/{file_name}-{i}.csv')\n", (5441, 5480), False, 'from pandas import read_csv\n')] |
from bearlibterminal import terminal as term
from spaceship.engine import Engine
from spaceship.menus.main import Main
def test_engine_init():
e = Engine()
assert isinstance(e.scene, Main)
def test_engine_run():
e = Engine()
e.run()
if __name__ == "__main__":
test_engine_run() | [
"spaceship.engine.Engine"
] | [((152, 160), 'spaceship.engine.Engine', 'Engine', ([], {}), '()\n', (158, 160), False, 'from spaceship.engine import Engine\n'), ((230, 238), 'spaceship.engine.Engine', 'Engine', ([], {}), '()\n', (236, 238), False, 'from spaceship.engine import Engine\n')] |
# coding=utf-8
import os
import re
from collections import OrderedDict
from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from letterparser import build, parse, utils, zip_lib
# max level of recursion adding content blocks supported
MAX_LEVEL = 5
def set_if_value(element, name, value):
"""set Element attribute if the value is not empty"""
if value:
element.set(name, value)
def generate_xml_from_file(
file_name, root_tag="root", pretty=False, indent="", config=None, temp_dir="tmp"
):
"""from file input, generate from zip or docx based on the file extension"""
if re.match(r".*\.[Zz][Ii][Pp]$", file_name):
return generate_xml_from_zip(
file_name,
root_tag=root_tag,
pretty=pretty,
indent=indent,
config=config,
temp_dir=temp_dir,
)
return generate_xml_from_docx(
file_name,
root_tag=root_tag,
pretty=pretty,
indent=indent,
config=config,
temp_dir=temp_dir,
)
def generate_xml_from_zip(
file_name, root_tag="root", pretty=False, indent="", config=None, temp_dir="tmp"
):
"""generate JATS output from zip file"""
docx_file_name, asset_file_names = zip_lib.unzip_zip(file_name, temp_dir)
return generate_xml_from_docx(
docx_file_name,
root_tag=root_tag,
pretty=pretty,
indent=indent,
config=config,
temp_dir=temp_dir,
)
def generate_xml_from_docx(
file_name, root_tag="root", pretty=False, indent="", config=None, temp_dir="tmp"
):
"""generate JATS output from docx file_name"""
articles = docx_to_articles(file_name, root_tag, config, temp_dir)
jats_xml = generate(articles, root_tag, temp_dir)
return output_xml(jats_xml, pretty, indent)
def docx_to_articles(file_name, root_tag="root", config=None, temp_dir="tmp"):
"""convert the docx file to Article objects"""
jats_content = parse.best_jats(
file_name, root_tag, config=config, temp_dir=temp_dir
)
return build.build_articles(jats_content, file_name=file_name, config=config)
def generate(articles, root_tag="root", temp_dir="tmp"):
"""from jats_content generate final JATS output"""
# Create the root XML node
root = Element(root_tag)
# set namespaces
root.set("xmlns:ali", "http://www.niso.org/schemas/ali/1.0/")
root.set("xmlns:mml", "http://www.w3.org/1998/Math/MathML")
root.set("xmlns:xlink", "http://www.w3.org/1999/xlink")
for article in articles:
sub_article_tag = SubElement(root, "sub-article")
set_if_value(sub_article_tag, "article-type", article.article_type)
set_if_value(sub_article_tag, "id", article.id)
set_front_stub(sub_article_tag, article)
set_body(sub_article_tag, article)
# set tag id attributes per sub-article
set_id_attributes(sub_article_tag, "mml:math", article.id)
set_id_attributes(sub_article_tag, "disp-formula", article.id)
set_id_attributes(sub_article_tag, "fig", article.id)
set_id_attributes(sub_article_tag, "table-wrap", article.id)
set_id_attributes(sub_article_tag, "media", article.id)
# highlight mentions of fig, media, table with an xref tag
asset_xref_tags(sub_article_tag)
# rename asset files in the XML
rename_assets(root, temp_dir)
return root
def rename_assets(root, temp_dir="tmp"):
"""rename xlink:link values if matches the file names in the temp_dir"""
# profile the image file names in the tmp folder
file_names = sorted(os.listdir(temp_dir))
file_name_map = OrderedDict()
for file_name in file_names:
file_name_name = utils.get_file_name_file(file_name).split(".")[0]
if file_name_name:
file_name_map[file_name_name] = file_name
# search for tags and rewrite the xlink:href values
xpath_list = [".//graphic", ".//media"]
for xpath in xpath_list:
for tag in root.findall(xpath):
href = tag.get("xlink:href")
if href and href in file_name_map:
tag.set("xlink:href", file_name_map.get(href))
def id_prefix(tag_name):
"""return the id attribute prefix for the tag name"""
id_prefix_map = {
"mml:math": "m",
"disp-formula": "equ",
"fig": "fig",
"table-wrap": "table",
"media": "video",
}
return str(id_prefix_map.get(tag_name))
def set_id_attributes(root, tag_name, article_id):
"""set the id attribute of tags"""
i = 1
for tag in root.iter(tag_name):
if "id" not in tag.attrib:
tag.set("id", "%s%s%s" % (article_id, id_prefix(tag_name), i))
i += 1
def set_front_stub(parent, article):
front_stub_tag = SubElement(parent, "front-stub")
if article.doi:
doi_tag = SubElement(front_stub_tag, "article-id")
doi_tag.set("pub-id-type", "doi")
doi_tag.text = article.doi
if article.title:
title_group_tag = SubElement(front_stub_tag, "title-group")
article_title_tag = SubElement(title_group_tag, "article-title")
article_title_tag.text = article.title
# add related-object link to Editor's evaluation
related_object_num = 1
for related_material in article.related_articles:
if related_material.ext_link_type and related_material.xlink_href:
related_object_tag = SubElement(front_stub_tag, "related-object")
related_object_tag.set("id", "%sro%s" % (article.id, related_object_num))
related_object_tag.set("object-id-type", "id")
related_object_tag.set(
"object-id", utils.object_id_from_uri(related_material.xlink_href)
)
related_object_tag.set("link-type", related_material.ext_link_type)
related_object_tag.set("xlink:href", related_material.xlink_href)
related_object_num += 1
def set_body(parent, article):
body_tag = SubElement(parent, "body")
set_content_blocks(body_tag, article.content_blocks)
return body_tag
def set_content_blocks(parent, content_blocks, level=1):
if level > MAX_LEVEL:
raise Exception("Maximum level of nested content blocks reached")
for block in content_blocks:
block_tag = None
if block.block_type in [
"boxed-text",
"disp-formula",
"disp-quote",
"fig",
"list",
"media",
"p",
"table-wrap",
]:
# retain standard tag attributes as well as any specific ones from the block object
if block.content:
utils.append_to_parent_tag(
parent,
block.block_type,
block.content,
utils.XML_NAMESPACE_MAP,
attributes=block.attr_names(),
attributes_text=block.attr_string(),
)
block_tag = parent[-1]
else:
# add empty tags too
block_tag = SubElement(parent, block.block_type)
block_tag.text = block.content
for key, value in block.attr.items():
block_tag.set(key, value)
if block_tag is not None and block.content_blocks:
# recursion
set_content_blocks(block_tag, block.content_blocks, level + 1)
def labels(root):
"""find label values from assets"""
asset_labels = []
name_type_map = OrderedDict(
[("fig", "fig"), ("media", "video"), ("table-wrap", "table")]
)
for tag_name in list(name_type_map):
for block_tag in root.findall(".//" + tag_name):
label_tags = block_tag.findall(".//label")
if block_tag.get("id") and label_tags:
asset_label = OrderedDict()
asset_label["id"] = block_tag.get("id")
asset_label["type"] = name_type_map.get(tag_name)
asset_label["text"] = label_tags[0].text
asset_labels.append(asset_label)
return asset_labels
def asset_xref_tags(root):
"""
wrap mentions of asset labels in paragraphs with an <xref> tag
method to replace tags in an ElementTree it will remove the old one and insert the new
which requires to know the p tag parent and index of the p tag inside that parent
"""
asset_labels = labels(root)
# strip full stop at end of label if present
for label in asset_labels:
if label.get("text"):
label["text"] = label.get("text").rstrip(".")
# look for tags that have a p tag in them
for p_tag_parent in root.findall(".//p/.."):
p_tag_parent_asset_xref(p_tag_parent, asset_labels)
def p_tag_parent_asset_xref(p_tag_parent, asset_labels):
# loop through the p tags in this parent tag, keeping track of the p tag index
for tag_index, child_tag in enumerate(p_tag_parent.iterfind("*")):
if not child_tag.tag == "p":
continue
tag_string = build.element_to_string(child_tag)
modified_tag_string = xml_string_asset_xref(tag_string, asset_labels)
if tag_string != modified_tag_string:
# add namespaces before parsing again
p_tag_string = "<p %s>" % utils.reparsing_namespaces(
utils.XML_NAMESPACE_MAP
)
modified_tag_string = re.sub(
r"^<p>", p_tag_string, str(modified_tag_string)
)
new_p_tag = ElementTree.fromstring(modified_tag_string)
# remove old tag
p_tag_parent.remove(child_tag)
# insert the new tag
p_tag_parent.insert(tag_index, new_p_tag)
def profile_asset_labels(labels):
"check if label term is unique or whether another label starts with it"
labels_data = []
for label in labels:
labels_start_with = [
search_label
for search_label in labels
if search_label.startswith(label) and search_label != label
]
data = OrderedDict([("label", label), ("unique", not bool(labels_start_with))])
labels_data.append(data)
return labels_data
def sort_labels(labels_data):
"sort asset labels with unique ones first then the rest"
unique_labels = [
match_group for match_group in labels_data if match_group.get("unique")
]
non_unique_labels = [
match_group for match_group in labels_data if not match_group.get("unique")
]
return unique_labels + non_unique_labels
def label_match_pattern(xref_open_tag, label_text):
"regular expression to find mentions of a label in text that are not already xref tagged"
return r"(?<!%s)(%s[-a-zA-z]*)" % (xref_open_tag, label_text)
def label_matches(xml_string, xref_open_tag, label_text):
"get list of labels in text that are not already preceeded by the xref tag"
return re.findall(label_match_pattern(xref_open_tag, label_text), xml_string)
def xml_string_asset_xref(xml_string, asset_labels):
"""
Wrap occurences of each asset label in the XML string with an <xref> tag
The label in the text can also include a specific panel name, e.g.
a label of "Author response image 1", when adding <xref> tags to the text it can result in
all of these example possibilites
<xref ref-type="fig" rid="sa2fig1">Author response image 1</xref>
<xref ref-type="fig" rid="sa2fig1">Author response image 1B</xref>
<xref ref-type="fig" rid="sa2fig1">Author response image 1A-F</xref>
<xref ref-type="fig" rid="sa2fig1">Author response image 1A</xref> and B
"""
for asset_label in asset_labels:
if asset_label.get("text") and asset_label.get("text") in str(xml_string):
attr = {"rid": asset_label.get("id"), "ref-type": asset_label.get("type")}
xref_open_tag = utils.open_tag("xref", attr)
xref_close_tag = utils.close_tag("xref")
# look for label in the text but not preceeded by the xref open tag we want to add
label_match_groups = label_matches(
xml_string, xref_open_tag, asset_label.get("text")
)
labels = sort_labels(profile_asset_labels(label_match_groups))
for label in labels:
safe_match_pattern = r"(?<!%s)%s" % (xref_open_tag, label.get("label"))
replacement_pattern = r"%s%s%s" % (
xref_open_tag,
label.get("label"),
xref_close_tag,
)
xml_string = re.sub(safe_match_pattern, replacement_pattern, xml_string)
return xml_string
def output_xml(root, pretty=False, indent=""):
"""output root XML Element to a string"""
encoding = "utf-8"
rough_string = ElementTree.tostring(root, encoding)
rough_string = utils.xml_string_fix_namespaces(rough_string, root.tag)
reparsed = minidom.parseString(rough_string)
if pretty is True:
return reparsed.toprettyxml(indent, encoding=encoding)
return reparsed.toxml(encoding=encoding)
def output_xml_escaped(root, pretty=False, indent=""):
"""output root XML Element to a string with character entities replaced"""
return utils.replace_character_entities(output_xml(root, pretty, indent))
| [
"letterparser.utils.open_tag",
"letterparser.utils.reparsing_namespaces",
"os.listdir",
"letterparser.utils.xml_string_fix_namespaces",
"letterparser.build.build_articles",
"xml.dom.minidom.parseString",
"letterparser.utils.object_id_from_uri",
"xml.etree.ElementTree.fromstring",
"collections.Ordere... | [((668, 709), 're.match', 're.match', (['""".*\\\\.[Zz][Ii][Pp]$"""', 'file_name'], {}), "('.*\\\\.[Zz][Ii][Pp]$', file_name)\n", (676, 709), False, 'import re\n'), ((1309, 1347), 'letterparser.zip_lib.unzip_zip', 'zip_lib.unzip_zip', (['file_name', 'temp_dir'], {}), '(file_name, temp_dir)\n', (1326, 1347), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((2029, 2099), 'letterparser.parse.best_jats', 'parse.best_jats', (['file_name', 'root_tag'], {'config': 'config', 'temp_dir': 'temp_dir'}), '(file_name, root_tag, config=config, temp_dir=temp_dir)\n', (2044, 2099), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((2125, 2195), 'letterparser.build.build_articles', 'build.build_articles', (['jats_content'], {'file_name': 'file_name', 'config': 'config'}), '(jats_content, file_name=file_name, config=config)\n', (2145, 2195), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((2352, 2369), 'xml.etree.ElementTree.Element', 'Element', (['root_tag'], {}), '(root_tag)\n', (2359, 2369), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((3714, 3727), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3725, 3727), False, 'from collections import OrderedDict\n'), ((4856, 4888), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""front-stub"""'], {}), "(parent, 'front-stub')\n", (4866, 4888), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((6062, 6088), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', '"""body"""'], {}), "(parent, 'body')\n", (6072, 6088), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((7618, 7692), 'collections.OrderedDict', 'OrderedDict', (["[('fig', 'fig'), ('media', 'video'), ('table-wrap', 'table')]"], {}), "([('fig', 'fig'), ('media', 'video'), ('table-wrap', 'table')])\n", (7629, 7692), False, 'from collections import OrderedDict\n'), ((12907, 12943), 'xml.etree.ElementTree.tostring', 'ElementTree.tostring', (['root', 'encoding'], {}), '(root, encoding)\n', (12927, 12943), False, 'from xml.etree import ElementTree\n'), ((12963, 13018), 'letterparser.utils.xml_string_fix_namespaces', 'utils.xml_string_fix_namespaces', (['rough_string', 'root.tag'], {}), '(rough_string, root.tag)\n', (12994, 13018), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((13034, 13067), 'xml.dom.minidom.parseString', 'minidom.parseString', (['rough_string'], {}), '(rough_string)\n', (13053, 13067), False, 'from xml.dom import minidom\n'), ((2636, 2667), 'xml.etree.ElementTree.SubElement', 'SubElement', (['root', '"""sub-article"""'], {}), "(root, 'sub-article')\n", (2646, 2667), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((3672, 3692), 'os.listdir', 'os.listdir', (['temp_dir'], {}), '(temp_dir)\n', (3682, 3692), False, 'import os\n'), ((4927, 4967), 'xml.etree.ElementTree.SubElement', 'SubElement', (['front_stub_tag', '"""article-id"""'], {}), "(front_stub_tag, 'article-id')\n", (4937, 4967), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((5093, 5134), 'xml.etree.ElementTree.SubElement', 'SubElement', (['front_stub_tag', '"""title-group"""'], {}), "(front_stub_tag, 'title-group')\n", (5103, 5134), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((5163, 5207), 'xml.etree.ElementTree.SubElement', 'SubElement', (['title_group_tag', '"""article-title"""'], {}), "(title_group_tag, 'article-title')\n", (5173, 5207), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((9143, 9177), 'letterparser.build.element_to_string', 'build.element_to_string', (['child_tag'], {}), '(child_tag)\n', (9166, 9177), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((5497, 5541), 'xml.etree.ElementTree.SubElement', 'SubElement', (['front_stub_tag', '"""related-object"""'], {}), "(front_stub_tag, 'related-object')\n", (5507, 5541), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((9617, 9660), 'xml.etree.ElementTree.fromstring', 'ElementTree.fromstring', (['modified_tag_string'], {}), '(modified_tag_string)\n', (9639, 9660), False, 'from xml.etree import ElementTree\n'), ((11974, 12002), 'letterparser.utils.open_tag', 'utils.open_tag', (['"""xref"""', 'attr'], {}), "('xref', attr)\n", (11988, 12002), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((12032, 12055), 'letterparser.utils.close_tag', 'utils.close_tag', (['"""xref"""'], {}), "('xref')\n", (12047, 12055), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((5752, 5805), 'letterparser.utils.object_id_from_uri', 'utils.object_id_from_uri', (['related_material.xlink_href'], {}), '(related_material.xlink_href)\n', (5776, 5805), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((7174, 7210), 'xml.etree.ElementTree.SubElement', 'SubElement', (['parent', 'block.block_type'], {}), '(parent, block.block_type)\n', (7184, 7210), False, 'from xml.etree.ElementTree import Element, SubElement\n'), ((7941, 7954), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7952, 7954), False, 'from collections import OrderedDict\n'), ((9391, 9442), 'letterparser.utils.reparsing_namespaces', 'utils.reparsing_namespaces', (['utils.XML_NAMESPACE_MAP'], {}), '(utils.XML_NAMESPACE_MAP)\n', (9417, 9442), False, 'from letterparser import build, parse, utils, zip_lib\n'), ((12687, 12746), 're.sub', 're.sub', (['safe_match_pattern', 'replacement_pattern', 'xml_string'], {}), '(safe_match_pattern, replacement_pattern, xml_string)\n', (12693, 12746), False, 'import re\n'), ((3786, 3821), 'letterparser.utils.get_file_name_file', 'utils.get_file_name_file', (['file_name'], {}), '(file_name)\n', (3810, 3821), False, 'from letterparser import build, parse, utils, zip_lib\n')] |
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import tensorflow as tf
import os
model_names_=['deeplabv3_mnv2_cityscapes_train', 'edgetpu-deeplab', 'edgetpu-deeplab-slim']
input_arrays_=[['ImageTensor'], ['ImageTensor'], ['ImageTensor']]
output_arrays_ = [['SemanticPredictions'], ['SemanticPredictions'], ['SemanticPredictions']]
for model_name, input_arrays, output_arrays in zip(model_names_, input_arrays_, output_arrays_):
local_dir = f"./downloads/tf1/seg/{model_name}"
graph_def_file = f'{local_dir}/frozen_inference_graph.pb'
output_file = f'{local_dir}/tflite/model.tflite'
input_shapes = None #{input_arrays[0]: [1, 512, 1024, 3]}
output_dir = os.path.split(output_file)[0]
os.makedirs(output_dir, exist_ok=True)
#Converting a GraphDef from file.
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays, output_arrays, input_shapes=input_shapes)
tflite_model = converter.convert()
open(output_file, "wb").write(tflite_model)
| [
"tensorflow.compat.v1.lite.TFLiteConverter.from_frozen_graph",
"os.makedirs",
"os.path.split"
] | [((2213, 2251), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (2224, 2251), False, 'import os\n'), ((2306, 2433), 'tensorflow.compat.v1.lite.TFLiteConverter.from_frozen_graph', 'tf.compat.v1.lite.TFLiteConverter.from_frozen_graph', (['graph_def_file', 'input_arrays', 'output_arrays'], {'input_shapes': 'input_shapes'}), '(graph_def_file,\n input_arrays, output_arrays, input_shapes=input_shapes)\n', (2357, 2433), True, 'import tensorflow as tf\n'), ((2179, 2205), 'os.path.split', 'os.path.split', (['output_file'], {}), '(output_file)\n', (2192, 2205), False, 'import os\n')] |
import pickle
import numpy as np
import matplotlib.pyplot as plt
with open('./quadratic/eval_record.pickle','rb') as loss:
data = pickle.load(loss)
print('Mat_record',len(data['Mat_record']))
#print('bias',data['inter_gradient_record'])
#print('constant',data['intra_record'])
with open('./quadratic/evaluate_record.pickle','rb') as loss1:
data1 = pickle.load(loss1)
x = np.array(data1['x_record'])
print('x_record',x.shape)
#print('bias',data1['inter_gradient_record'])
#print('constant',data1['intra_record'])
#x = range(10000)
#ax = plt.axes(yscale='log')
#ax.plot(x,data,'b')
#plt.show('loss') | [
"numpy.array",
"pickle.load"
] | [((382, 409), 'numpy.array', 'np.array', (["data1['x_record']"], {}), "(data1['x_record'])\n", (390, 409), True, 'import numpy as np\n'), ((135, 152), 'pickle.load', 'pickle.load', (['loss'], {}), '(loss)\n', (146, 152), False, 'import pickle\n'), ((359, 377), 'pickle.load', 'pickle.load', (['loss1'], {}), '(loss1)\n', (370, 377), False, 'import pickle\n')] |
import copy
# Saves room and client list
initialMsg = ':JACK! {0.0.0.0, 5000} PRIVMSG #: /JOIN #\n' # {IP,port}
msg = "PRIVMSG #cats: Hello World! I'm back!\n"
qmsg = "PRIVMSG #cats: /part #cats"
client = "('127.0.0.1', 41704)"
message = {'nick': '', 'client': '', 'chan': '', 'cmd': '', 'msg': ''}
test = ":BEN! {('127.0.0.1', 43452)} PRIVMSG #: /JOIN #"
class master():
def __init__(self):
self.room = {'#': {}} # Adds first channel 0 as #
self.var = '' # Temp storage
# Allows client to join or create a new chan
def eval(self, data, client):
print('Evaluating data')
msg = self.parse(data, client) # Chop raw data up into hashable pieces
# Add user to new room
if msg['cmd']:
print(f'Processing Command: {msg["cmd"]}')
if msg['cmd'].lower() == 'join':
self.add_client(msg['client'], msg['chan'], msg['nick'])
elif msg['cmd'].lower() == 'part':
self.rm_client(msg['client'], msg['chan'], msg['nick'])
elif msg['cmd'].lower() == 'list' and msg['msg'] == '':
return self.list()
elif msg['cmd'].lower() == 'list' and msg['msg'] != '':
return self.list(msg['msg'])
print(f'\nmsg: {msg}')
print(f'room: {self.room}')
# Credit to <NAME> on Stackoverflow
def recursive_find_nick(self, to_match, d):
for k, v in d.items():
if isinstance(v, dict):
self.recursive_find_nick(to_match, v)
else:
if to_match is k:
print('Client exists {0} : {1}'.format(k, v))
self.var = v
return v
# Credit to <NAME> on Stackoverflow
def recursive_find_client(self, to_match, d):
for k, v in d.items():
if isinstance(v, dict):
self.recursive_find_client(to_match, v)
else:
if to_match is v:
print('Nick exists {0} : {1}'.format(k, v))
self.var = k
return k
def parse(self, data, client):
# Pull prefix, if exists. This will mean prefix should be used to save
# user data instead of client address; it implies the message was
# forwarded by another server
message = {'nick': '', 'client': '', 'chan': '', 'cmd': '', 'msg': ''}
if data[0] == ':':
print('\nChecking prefix...') # Need to check/add user
# Parse NICK
string = data.split('!', 1)
message['nick'] = copy.deepcopy(string[0].lstrip(':'))
# Parse real Client IP address
string = string[1].split('PRIVMSG', 1)
message['client'] = copy.deepcopy(string[0])
# Parse Channel
string = string[1].split(':', 1)
message['chan'] = copy.deepcopy(string[0].lstrip(' '))
# Else strip PRIVMSG off of front. Needs double for chan to get in []
else:
print('Parsing message...')
# Finish the string parsing
string = data.split('PRIVMSG', 1)
# Parse Channel
string = string[1].split(':', 1)
message['chan'] = copy.deepcopy(string[0].lstrip(' '))
# Fill out client and nick
self.var = None
self.find_nick(client)
if self.var is None:
self.add_client(client, message['chan'], message['nick'])
self.var = None
self.find_nick(client)
message['client'] = copy.deepcopy(client)
message['nick'] = copy.deepcopy(self.var)
# Everyone gets cmd parsed, if it exists
if string[1].find('/', 1, 2) == 1:
temp = string[1].lstrip(' /')
string = temp.split(' ')
message['cmd'] = copy.deepcopy(string[0].lstrip('/'))
message['msg'] = copy.deepcopy(string[1].rstrip('\n'))
else:
message['msg'] = copy.deepcopy(string[1].rstrip('\n'))
return message
# Find nick using client
def find_nick(self, client):
return self.recursive_find_nick(client, self.room)
# Find client using nick
def find_client(self, nick):
return self.recursive_find_client(nick, self.room)
def find_chan(self, chan):
for key in self.room.keys():
if chan == key:
print(f'{chan} exists')
return True
print(f'{chan} does not exist')
return False
def create_chan(self, chan, client):
self.room[chan] = {}
# Add client to channel
def add_client(self, client, chan, nick):
# If no room, create it
if not self.find_chan(chan):
print('Creating new channel')
self.create_chan(chan, client)
nick_fetch = self.find_nick(client)
if nick_fetch is None:
if nick == '':
nick = 'Guest'
self.room[chan][client] = nick
else:
self.room[chan][client] = nick_fetch
print(f'{nick} joined {chan}')
def rm_client(self, client, chan, nick):
print(f'{client} [{nick}] parting from {chan}')
if self.find_chan(chan):
del self.room[chan][client]
if self.room[chan] == {}:
print(f'{chan} is an empty room. Deleting...')
del self.room[chan]
else:
print(f'Unable to find {chan}')
# Returns all channels
def list(self):
rlist = []
for key in self.room.keys():
rlist.append(key)
return rlist
# Returns a channels members
def list(self, chan):
if self.find_chan(chan):
rlist = []
for key in self.room[chan]:
rlist.append(self.room[chan][key])
return rlist
else:
print('Error: Invalid channel')
return None
def main():
channels = master()
#channels.eval(initialMsg, client)
#channels.eval(msg, client)
#channels.eval(qmsg, client)
#print(channels.list('#cats'))
channels.eval(test, client)
if __name__ == '__main__':
main()
| [
"copy.deepcopy"
] | [((2767, 2791), 'copy.deepcopy', 'copy.deepcopy', (['string[0]'], {}), '(string[0])\n', (2780, 2791), False, 'import copy\n'), ((3603, 3624), 'copy.deepcopy', 'copy.deepcopy', (['client'], {}), '(client)\n', (3616, 3624), False, 'import copy\n'), ((3655, 3678), 'copy.deepcopy', 'copy.deepcopy', (['self.var'], {}), '(self.var)\n', (3668, 3678), False, 'import copy\n')] |
#
# (C) Copyright IBM Corp. 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures as cf
import inspect
import json
import logging
import os
import re
import threading
import time
from uuid import uuid4
from pathlib import Path
import socket
from ibm_cloud_sdk_core import ApiException
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_vpc import VpcV1
from ray.autoscaler._private.cli_logger import cli_logger
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import (TAG_RAY_CLUSTER_NAME, NODE_KIND_HEAD,
NODE_KIND_WORKER, TAG_RAY_NODE_KIND, TAG_RAY_NODE_NAME)
from ray.autoscaler._private.util import hash_runtime_conf
logger = logging.getLogger(__name__)
INSTANCE_NAME_UUID_LEN = 8
INSTANCE_NAME_MAX_LEN = 64
PENDING_TIMEOUT = 120
# TODO: move to constants
PROFILE_NAME_DEFAULT = "cx2-2x4"
VOLUME_TIER_NAME_DEFAULT = "general-purpose"
RAY_RECYCLABLE = "ray-recyclable"
RETRIES = 10
def _create_vpc_client(endpoint, authenticator):
"""
Creates an IBM VPC python-sdk instance
"""
ibm_vpc_client = VpcV1("2021-01-19", authenticator=authenticator)
ibm_vpc_client.set_service_url(endpoint + "/v1")
return ibm_vpc_client
class Gen2NodeProvider(NodeProvider):
"""Node Provider for IBM Gen2
This provider assumes ray-cluster.yaml contains IBM Cloud credentials and
all necessary gen2 details including existing VPC id, VS image, security
group...etc.
Easiest way to generate config file is to use `lithopscloud` config tool.
Install it using `pip install lithopscloud`, run it, choose `Ray Gen2` and
follow interactive wizard.
Currently, instance tagging is implemented using internal cache file
To communicate with head node from outside cluster private network, using
provider `use_hybrid_ips` flag cluster head node may be provisioned
with floating (external) ip and the rest of worker nodes will be allocated
only private ips.
"""
# Decorator to wrap a function to reinit clients and retry on except.
def retry_on_except(func):
def decorated_func(*args, **kwargs):
name = func.__name__
ex = None
for retry in range(RETRIES):
try:
result = func(*args, **kwargs)
return result
except Exception as e:
ex = e
msg = f"Err in {name}, {e}, retries left {RETRIES-retry}"
cli_logger.error(msg)
logger.exception(msg)
logger.info("reiniting clients and waiting few seconds")
_self = args[0]
with _self.lock:
_self.ibm_vpc_client = _create_vpc_client(
_self.endpoint, IAMAuthenticator(_self.iam_api_key))
time.sleep(1)
# we got run out of retries, now raising
raise ex
return decorated_func
"""
Tracing decorator. Needed for debugging. Will be removed before merging.
"""
def log_in_out(func):
def decorated_func(*args, **kwargs):
name = func.__name__
logger.info(
f"Enter {name} from {inspect.stack()[0][3]} "
f"{inspect.stack()[1][3]} {inspect.stack()[2][3]} with args: "
f"{args} and kwargs {kwargs}")
try:
result = func(*args, **kwargs)
logger.info(
f"Leave {name} from {inspect.stack()[1][3]} with result "
f"{result}, entered with args: {args}")
except Exception:
cli_logger.error(f"Error in {name}")
raise
return result
return decorated_func
"""
load cluster tags from cache file
for instance in cache
if instance not exist
remove from cache
"""
def _load_tags(self):
self.nodes_tags = {}
ray_cache = Path(Path.home(), Path('.ray'))
ray_cache.mkdir(exist_ok=True)
self.tags_file = Path(ray_cache, Path('tags.json'))
if self.tags_file.is_file():
all_tags = json.loads(self.tags_file.read_text())
tags = all_tags.get(self.cluster_name, {})
for instance_id, instance_tags in tags.items():
try:
# this one is needed to filter out instances
# dissapeared since master was up
self.ibm_vpc_client.get_instance(instance_id)
self.nodes_tags[instance_id] = instance_tags
except Exception as e:
cli_logger.warning(instance_id)
if e.message == "Instance not found":
logger.error(
f"cached instance {instance_id} not found, \
will be removed from cache")
self.set_node_tags(None, None)
else:
# check if the current node is a head node
name = socket.gethostname()
logger.info(f'Check if {name} is HEAD')
if self._get_node_type(name) == NODE_KIND_HEAD:
logger.info(f'{name} is HEAD')
node = self.ibm_vpc_client.list_instances(
name=name).get_result()['instances']
if node:
logger.info(f'{name} is node {node} in vpc')
ray_bootstrap_config = Path(Path.home(), Path('ray_bootstrap_config.yaml'))
config = json.loads(ray_bootstrap_config.read_text())
(runtime_hash, file_mounts_contents_hash) = hash_runtime_conf(config["file_mounts"], None, config)
head_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
"ray-node-name": name,
"ray-node-status": "up-to-date",
"ray-cluster-name": self.cluster_name,
"ray-user-node-type": config['head_node_type'],
"ray-runtime-config": runtime_hash,
"ray-file-mounts-contents": file_mounts_contents_hash
}
logger.info(f'Setting HEAD node tags {head_tags}')
self.set_node_tags(node[0]['id'], head_tags)
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
# ==============
# workarround to set all loggers to debug level
# ==============
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for handler in logger.handlers:
handler.setLevel(logging.DEBUG)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(logging.DEBUG)
# ==============
self.lock = threading.RLock()
self.endpoint = self.provider_config["endpoint"]
self.iam_api_key = self.provider_config["iam_api_key"]
self.ibm_vpc_client = _create_vpc_client(
self.endpoint, IAMAuthenticator(self.iam_api_key))
self._load_tags()
# Cache of node objects from the last nodes() call
self.cached_nodes = {}
# cache of the nodes created, but not yet tagged
self.pending_nodes = {}
self.deleted_nodes = []
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
def _get_node_type(self, name):
if f"{self.cluster_name}-{NODE_KIND_WORKER}" in name:
return NODE_KIND_WORKER
elif f"{self.cluster_name}-{NODE_KIND_HEAD}" in name:
return NODE_KIND_HEAD
"""
in case filter is as simple as get all nodes or get worker nodes or get head nodes
return nodes based on naming
"""
def _get_nodes_by_tags(self, filters):
nodes = []
if not filters or list(filters.keys()) == [TAG_RAY_NODE_KIND]:
result = self.ibm_vpc_client.list_instances().get_result()
instances = result['instances']
while result.get('next'):
start = result['next']['href'].split('start=')[1]
result = self.ibm_vpc_client.list_instances(
start=start).get_result()
instances.extend(result['instances'])
for instance in instances:
kind = self._get_node_type(instance['name'])
if kind and instance['id'] not in self.deleted_nodes:
if not filters or kind == filters[TAG_RAY_NODE_KIND]:
nodes.append(instance)
with self.lock:
node_cache = self.nodes_tags.setdefault(
instance['id'], {})
node_cache.update({
TAG_RAY_CLUSTER_NAME: self.cluster_name,
TAG_RAY_NODE_KIND: kind})
else:
with self.lock:
tags = self.nodes_tags.copy()
for node_id, node_tags in tags.items():
# filter by tags
if not all(item in node_tags.items() for item in filters.items()):
logger.info(f"specified filter {filters} doesn't match node tags {node_tags}")
continue
try:
nodes.append(self.ibm_vpc_client.get_instance(node_id).result)
except Exception as e:
cli_logger.warning(node_id)
if e.message == "Instance not found":
logger.error(f"failed to find vsi {node_id}, skipping")
continue
logger.error(f"failed to find instance {node_id}, raising")
raise e
return nodes
"""
Returns ids of non terminated nodes
"""
@log_in_out
def non_terminated_nodes(self, tag_filters):
nodes = []
found_nodes = self._get_nodes_by_tags(tag_filters)
for node in found_nodes:
# check if node scheduled for delete
with self.lock:
if node['id'] in self.deleted_nodes:
logger.info(f"{node['id']} scheduled for delete")
continue
# validate instance in correct state
valid_statuses = ["pending", "starting", "running"]
if node["status"] not in valid_statuses:
logger.info(f"{node['id']} status {node['status']}"
f" not in {valid_statuses}, skipping")
continue
# validate instance not hanging in pending state
with self.lock:
if node['id'] in self.pending_nodes:
if node["status"] != "running":
pending_time = self.pending_nodes[node['id']] - time.time()
logger.info(
f"{node['id']} is pending for {pending_time}"
)
if pending_time > PENDING_TIMEOUT:
logger.error(
f"pending timeout {PENDING_TIMEOUT} reached, "
f"deleting instance {node['id']}")
self._delete_node(node['id'])
else:
self.pending_nodes.pop(node['id'], None)
if self._get_node_type(node["name"]) == NODE_KIND_HEAD:
nic_id = node["network_interfaces"][0]["id"]
# find head node external ip
res = self.ibm_vpc_client.\
list_instance_network_interface_floating_ips(
node['id'], nic_id).get_result()
floating_ips = res["floating_ips"]
if len(floating_ips) == 0:
# not adding a head node missing floating ip
continue
else:
# currently head node always has floating ip
# in case floating ip present we want to add it
node["floating_ips"] = floating_ips
nodes.append(node)
for node in nodes:
self.cached_nodes[node["id"]] = node
return [node["id"] for node in nodes]
@log_in_out
def is_running(self, node_id):
with self.lock:
node = self._get_cached_node(node_id)
return node["status"] == "running"
@log_in_out
def is_terminated(self, node_id):
with self.lock:
try:
node = self._get_cached_node(node_id)
return node["status"] not in ["running", "starting", "pending"]
except Exception as e:
return True
@log_in_out
def node_tags(self, node_id):
with self.lock:
return self.nodes_tags.get(node_id, {})
# return external ip for head and private ips for workers
def _get_hybrid_ip(self, node_id):
node = self._get_cached_node(node_id)
node_type = self._get_node_type(node["name"])
if node_type == NODE_KIND_HEAD:
fip = node.get("floating_ips")
if fip:
return fip[0]["address"]
node = self._get_node(node_id)
fip = node.get("floating_ips")
if fip:
return fip[0]["address"]
else:
return self.internal_ip(node_id)
@log_in_out
def external_ip(self, node_id):
with self.lock:
if self.provider_config.get("use_hybrid_ips"):
return self._get_hybrid_ip(node_id)
node = self._get_cached_node(node_id)
fip = node.get("floating_ips")
if fip:
return fip[0]["address"]
node = self._get_node(node_id)
fip = node.get("floating_ips")
if fip:
return fip[0]["address"]
@log_in_out
def internal_ip(self, node_id):
node = self._get_cached_node(node_id)
try:
primary_ipv4_address = node["network_interfaces"][0].get(
"primary_ipv4_address")
if primary_ipv4_address is None:
node = self._get_node(node_id)
except Exception:
node = self._get_node(node_id)
logger.info(f"in internal_ip, returning ip for node {node}")
return node["network_interfaces"][0].get("primary_ipv4_address")
@log_in_out
def set_node_tags(self, node_id, tags):
with self.lock:
# update inmemory cache
if node_id and tags:
node_cache = self.nodes_tags.setdefault(node_id, {})
node_cache.update(tags)
# dump inmemory cache to file
ray_cache = Path(Path.home(), Path('.ray'))
self.tags_file = Path(ray_cache, Path('tags.json'))
all_tags = {}
if self.tags_file.is_file():
all_tags = json.loads(self.tags_file.read_text())
all_tags[self.cluster_name] = self.nodes_tags
self.tags_file.write_text(json.dumps(all_tags))
def _get_instance_data(self, name):
"""
Returns the instance information
"""
instances_data = self.ibm_vpc_client.list_instances(
name=name).get_result()
if len(instances_data["instances"]) > 0:
return instances_data["instances"][0]
return None
def _create_instance(self, name, base_config):
"""
Creates a new VM instance
"""
logger.info("Creating new VM instance {}".format(name))
security_group_identity_model = {
"id": base_config["security_group_id"]
}
subnet_identity_model = {"id": base_config["subnet_id"]}
primary_network_interface = {
"name": "eth0",
"subnet": subnet_identity_model,
"security_groups": [security_group_identity_model]
}
boot_volume_profile = {
"capacity": base_config.get("boot_volume_capacity", 100),
"name": "{}-boot".format(name),
"profile": {
"name": base_config.get("volume_tier_name",
VOLUME_TIER_NAME_DEFAULT)
}
}
boot_volume_attachment = {
"delete_volume_on_instance_delete": True,
"volume": boot_volume_profile
}
key_identity_model = {"id": base_config["key_id"]}
profile_name = base_config.get("instance_profile_name",
PROFILE_NAME_DEFAULT)
instance_prototype = {}
instance_prototype["name"] = name
instance_prototype["keys"] = [key_identity_model]
instance_prototype["profile"] = {"name": profile_name}
instance_prototype["resource_group"] = {
"id": base_config["resource_group_id"]
}
instance_prototype["vpc"] = {"id": base_config["vpc_id"]}
instance_prototype["image"] = {"id": base_config["image_id"]}
instance_prototype["zone"] = {
"name": self.provider_config["zone_name"]
}
instance_prototype["boot_volume_attachment"] = boot_volume_attachment
instance_prototype[
"primary_network_interface"] = primary_network_interface
try:
with self.lock:
resp = self.ibm_vpc_client.create_instance(instance_prototype)
except ApiException as e:
if e.code == 400 and "already exists" in e.message:
return self._get_instance_data(name)
elif e.code == 400 and "over quota" in e.message:
cli_logger.error(
"Create VM instance {} failed due to quota limit"
.format(name))
else:
cli_logger.error(
"Create VM instance {} failed with status code {}".format(
name, str(e.code)))
raise e
logger.info("VM instance {} created successfully ".format(name))
return resp.result
def _create_floating_ip(self, base_config):
"""
Creates or attaches floating IP address
"""
if base_config.get("head_ip"):
for ip in self.ibm_vpc_client.list_floating_ips().get_result()[
"floating_ips"]:
if ip["address"] == base_config["head_ip"]:
return ip
floating_ip_name = "{}-{}".format(RAY_RECYCLABLE, uuid4().hex[:4])
logger.info("Creating floating IP {}".format(floating_ip_name))
floating_ip_prototype = {}
floating_ip_prototype["name"] = floating_ip_name
floating_ip_prototype["zone"] = {
"name": self.provider_config["zone_name"]
}
floating_ip_prototype["resource_group"] = {
"id": base_config["resource_group_id"]
}
response = self.ibm_vpc_client.create_floating_ip(
floating_ip_prototype)
floating_ip_data = response.result
return floating_ip_data
def _attach_floating_ip(self, instance, fip_data):
fip = fip_data["address"]
fip_id = fip_data["id"]
logger.info("Attaching floating IP {} to VM instance {}".format(
fip, instance["id"]))
# check if floating ip is not attached yet
inst_p_nic = instance["primary_network_interface"]
if inst_p_nic["primary_ipv4_address"] and inst_p_nic["id"] == fip_id:
# floating ip already attached. do nothing
logger.info("Floating IP {} already attached to eth0".format(fip))
else:
# attach floating ip
self.ibm_vpc_client.add_instance_network_interface_floating_ip(
instance["id"], instance["network_interfaces"][0]["id"],
fip_id)
def _stopped_nodes(self, tags):
filter = {
TAG_RAY_CLUSTER_NAME: self.cluster_name,
TAG_RAY_NODE_KIND:tags[TAG_RAY_NODE_KIND]
}
nodes = []
for node_id in self.nodes_tags:
try:
# do we need this filtering or simply can trust that all in the tags cach is related to the current cluster?
node_tags = self.nodes_tags[node_id]
if all(item in node_tags.items() for item in filter.items()):
node = self.ibm_vpc_client.get_instance(node_id).result
state = node["status"]
if state in ["stopped", "stopping"]:
nodes.append(node)
except Exception as e:
cli_logger.warning(node_id)
if e.message == "Instance not found":
continue
raise e
return nodes
def _create_node(self, base_config, tags):
name_tag = tags[TAG_RAY_NODE_NAME]
assert (len(name_tag) <=
(INSTANCE_NAME_MAX_LEN - INSTANCE_NAME_UUID_LEN - 1)
) and re.match("^[a-z0-9-:-]*$", name_tag), (name_tag,
len(name_tag))
# append instance name with uuid
name = "{name_tag}-{uuid}".format(
name_tag=name_tag, uuid=uuid4().hex[:INSTANCE_NAME_UUID_LEN])
# create instance in vpc
instance = self._create_instance(name, base_config)
# currently create and tag is not an atomic operation
with self.lock:
self.pending_nodes[instance["id"]] = time.time()
tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
tags[TAG_RAY_NODE_NAME] = name
self.set_node_tags(instance['id'], tags)
# currently always creating public ip for head node
if self._get_node_type(name) == NODE_KIND_HEAD:
fip_data = self._create_floating_ip(base_config)
self._attach_floating_ip(instance, fip_data)
return {instance["id"]: instance}
@log_in_out
def create_node(self, base_config, tags, count) -> None:
stopped_nodes_dict = {}
futures = []
# Try to reuse previously stopped nodes with compatible configs
if self.cache_stopped_nodes:
stopped_nodes = self._stopped_nodes(tags)
stopped_nodes_ids = [n["id"] for n in stopped_nodes]
stopped_nodes_dict = {
n["id"]: n for n in stopped_nodes
}
if stopped_nodes:
cli_logger.print(
f"Reusing nodes {stopped_nodes_ids}. "
"To disable reuse, set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration.")
for node in stopped_nodes:
logger.info(f"Starting instance {node['id']}")
self.ibm_vpc_client.create_instance_action(
node["id"], "start")
time.sleep(1)
for node_id in stopped_nodes_ids:
self.set_node_tags(node_id, tags)
with self.lock:
if node_id in self.deleted_nodes:
self.deleted_nodes.remove(node_id)
count -= len(stopped_nodes_ids)
created_nodes_dict = {}
# create multiple instances concurrently
if count:
with cf.ThreadPoolExecutor(count) as ex:
for i in range(count):
futures.append(
ex.submit(self._create_node, base_config, tags))
for future in cf.as_completed(futures):
created_node = future.result()
created_nodes_dict.update(created_node)
all_created_nodes = stopped_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
def _delete_node(self, node_id):
logger.info(f"in _delete_node with id {node_id}")
try:
floating_ips = []
try:
node = self._get_node(node_id)
floating_ips = node.get("floating_ips", [])
except Exception:
pass
self.ibm_vpc_client.delete_instance(node_id)
with self.lock:
# drop node tags
self.nodes_tags.pop(node_id, None)
self.pending_nodes.pop(node['id'], None)
self.deleted_nodes.append(node_id)
self.cached_nodes.pop(node_id, None)
# calling set_node_tags with None will trigger only dumps
self.set_node_tags(None, None)
for ip in floating_ips:
if ip["name"].startswith(RAY_RECYCLABLE):
self.ibm_vpc_client.delete_floating_ip(ip["id"])
except ApiException as e:
if e.code == 404:
pass
else:
raise e
@log_in_out
def terminate_nodes(self, node_ids):
if not node_ids:
return
futures = []
with cf.ThreadPoolExecutor(len(node_ids)) as ex:
for node_id in node_ids:
logger.info(
"NodeProvider: {}: Terminating node".format(node_id))
futures.append(ex.submit(self.terminate_node, node_id))
for future in cf.as_completed(futures):
future.result()
@log_in_out
def terminate_node(self, node_id):
"""
Deletes the VM instance and the associated volume
"""
logger.info("Deleting VM instance {}".format(node_id))
try:
if self.cache_stopped_nodes:
cli_logger.print(
f"Stopping instance {node_id}. To terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration")
self.ibm_vpc_client.create_instance_action(node_id, "stop")
else:
cli_logger.print(f"Terminating instance {node_id}")
self._delete_node(node_id)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
try:
node = self.ibm_vpc_client.get_instance(node_id).get_result()
with self.lock:
self.cached_nodes[node_id] = node
return node
except Exception as e:
logger.error(f"failed to get instance with id {node_id}")
raise e
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return cluster_config
| [
"logging.getLogger",
"pathlib.Path.home",
"time.sleep",
"ray.autoscaler.node_provider.NodeProvider.__init__",
"pathlib.Path",
"json.dumps",
"threading.RLock",
"concurrent.futures.as_completed",
"socket.gethostname",
"inspect.stack",
"ray.autoscaler._private.cli_logger.cli_logger.print",
"re.ma... | [((1229, 1256), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1246, 1256), False, 'import logging\n'), ((1616, 1664), 'ibm_vpc.VpcV1', 'VpcV1', (['"""2021-01-19"""'], {'authenticator': 'authenticator'}), "('2021-01-19', authenticator=authenticator)\n", (1621, 1664), False, 'from ibm_vpc import VpcV1\n'), ((7058, 7116), 'ray.autoscaler.node_provider.NodeProvider.__init__', 'NodeProvider.__init__', (['self', 'provider_config', 'cluster_name'], {}), '(self, provider_config, cluster_name)\n', (7079, 7116), False, 'from ray.autoscaler.node_provider import NodeProvider\n'), ((7250, 7269), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7267, 7269), False, 'import logging\n'), ((7611, 7628), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (7626, 7628), False, 'import threading\n'), ((26255, 26279), 'concurrent.futures.as_completed', 'cf.as_completed', (['futures'], {}), '(futures)\n', (26270, 26279), True, 'import concurrent.futures as cf\n'), ((4587, 4598), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (4596, 4598), False, 'from pathlib import Path\n'), ((4600, 4612), 'pathlib.Path', 'Path', (['""".ray"""'], {}), "('.ray')\n", (4604, 4612), False, 'from pathlib import Path\n'), ((4695, 4712), 'pathlib.Path', 'Path', (['"""tags.json"""'], {}), "('tags.json')\n", (4699, 4712), False, 'from pathlib import Path\n'), ((5669, 5689), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (5687, 5689), False, 'import socket\n'), ((7413, 7436), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (7430, 7436), False, 'import logging\n'), ((7832, 7866), 'ibm_cloud_sdk_core.authenticators.IAMAuthenticator', 'IAMAuthenticator', (['self.iam_api_key'], {}), '(self.iam_api_key)\n', (7848, 7866), False, 'from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n'), ((21985, 22021), 're.match', 're.match', (['"""^[a-z0-9-:-]*$"""', 'name_tag'], {}), "('^[a-z0-9-:-]*$', name_tag)\n", (21993, 22021), False, 'import re\n'), ((22499, 22510), 'time.time', 'time.time', ([], {}), '()\n', (22508, 22510), False, 'import time\n'), ((23885, 23898), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (23895, 23898), False, 'import time\n'), ((24515, 24539), 'concurrent.futures.as_completed', 'cf.as_completed', (['futures'], {}), '(futures)\n', (24530, 24539), True, 'import concurrent.futures as cf\n'), ((15731, 15742), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (15740, 15742), False, 'from pathlib import Path\n'), ((15744, 15756), 'pathlib.Path', 'Path', (['""".ray"""'], {}), "('.ray')\n", (15748, 15756), False, 'from pathlib import Path\n'), ((15803, 15820), 'pathlib.Path', 'Path', (['"""tags.json"""'], {}), "('tags.json')\n", (15807, 15820), False, 'from pathlib import Path\n'), ((16052, 16072), 'json.dumps', 'json.dumps', (['all_tags'], {}), '(all_tags)\n', (16062, 16072), False, 'import json\n'), ((23447, 23610), 'ray.autoscaler._private.cli_logger.cli_logger.print', 'cli_logger.print', (['f"""Reusing nodes {stopped_nodes_ids}. To disable reuse, set `cache_stopped_nodes: False` under `provider` in the cluster configuration."""'], {}), "(\n f'Reusing nodes {stopped_nodes_ids}. To disable reuse, set `cache_stopped_nodes: False` under `provider` in the cluster configuration.'\n )\n", (23463, 23610), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((24304, 24332), 'concurrent.futures.ThreadPoolExecutor', 'cf.ThreadPoolExecutor', (['count'], {}), '(count)\n', (24325, 24332), True, 'import concurrent.futures as cf\n'), ((26581, 26741), 'ray.autoscaler._private.cli_logger.cli_logger.print', 'cli_logger.print', (['f"""Stopping instance {node_id}. To terminate instead, set `cache_stopped_nodes: False` under `provider` in the cluster configuration"""'], {}), "(\n f'Stopping instance {node_id}. To terminate instead, set `cache_stopped_nodes: False` under `provider` in the cluster configuration'\n )\n", (26597, 26741), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((26910, 26961), 'ray.autoscaler._private.cli_logger.cli_logger.print', 'cli_logger.print', (['f"""Terminating instance {node_id}"""'], {}), "(f'Terminating instance {node_id}')\n", (26926, 26961), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((4245, 4281), 'ray.autoscaler._private.cli_logger.cli_logger.error', 'cli_logger.error', (['f"""Error in {name}"""'], {}), "(f'Error in {name}')\n", (4261, 4281), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((6308, 6362), 'ray.autoscaler._private.util.hash_runtime_conf', 'hash_runtime_conf', (["config['file_mounts']", 'None', 'config'], {}), "(config['file_mounts'], None, config)\n", (6325, 6362), False, 'from ray.autoscaler._private.util import hash_runtime_conf\n'), ((19484, 19491), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19489, 19491), False, 'from uuid import uuid4\n'), ((21614, 21641), 'ray.autoscaler._private.cli_logger.cli_logger.warning', 'cli_logger.warning', (['node_id'], {}), '(node_id)\n', (21632, 21641), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((3032, 3053), 'ray.autoscaler._private.cli_logger.cli_logger.error', 'cli_logger.error', (['msg'], {}), '(msg)\n', (3048, 3053), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((3437, 3450), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3447, 3450), False, 'import time\n'), ((5259, 5290), 'ray.autoscaler._private.cli_logger.cli_logger.warning', 'cli_logger.warning', (['instance_id'], {}), '(instance_id)\n', (5277, 5290), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((6122, 6133), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (6131, 6133), False, 'from pathlib import Path\n'), ((6135, 6168), 'pathlib.Path', 'Path', (['"""ray_bootstrap_config.yaml"""'], {}), "('ray_bootstrap_config.yaml')\n", (6139, 6168), False, 'from pathlib import Path\n'), ((22231, 22238), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22236, 22238), False, 'from uuid import uuid4\n'), ((10365, 10392), 'ray.autoscaler._private.cli_logger.cli_logger.warning', 'cli_logger.warning', (['node_id'], {}), '(node_id)\n', (10383, 10392), False, 'from ray.autoscaler._private.cli_logger import cli_logger\n'), ((11766, 11777), 'time.time', 'time.time', ([], {}), '()\n', (11775, 11777), False, 'import time\n'), ((3359, 3394), 'ibm_cloud_sdk_core.authenticators.IAMAuthenticator', 'IAMAuthenticator', (['_self.iam_api_key'], {}), '(_self.iam_api_key)\n', (3375, 3394), False, 'from ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n'), ((3817, 3832), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (3830, 3832), False, 'import inspect\n'), ((3861, 3876), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (3874, 3876), False, 'import inspect\n'), ((3885, 3900), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (3898, 3900), False, 'import inspect\n'), ((4102, 4117), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (4115, 4117), False, 'import inspect\n')] |
from django.forms import TextInput
from django.forms.widgets import MultiWidget, RadioSelect
from django.template.loader import render_to_string
class MultiTextWidget(MultiWidget):
def __init__(self, widgets_length, **kwargs):
widgets = [TextInput() for _ in range(widgets_length)]
kwargs.update({"widgets": widgets})
super(MultiTextWidget, self).__init__(**kwargs)
def decompress(self, value):
return value if value is not None else []
def format_output(self, rendered_widgets):
return render_to_string(
"formly/run/_multiple_input.html",
context={
"inputs": rendered_widgets
}
)
class LikertSelect(RadioSelect):
"""
This class differentiates Likert-scale radio selects
from "normal" radio selects for presentation purposes.
"""
pass
class RatingSelect(RadioSelect):
pass
| [
"django.template.loader.render_to_string",
"django.forms.TextInput"
] | [((543, 636), 'django.template.loader.render_to_string', 'render_to_string', (['"""formly/run/_multiple_input.html"""'], {'context': "{'inputs': rendered_widgets}"}), "('formly/run/_multiple_input.html', context={'inputs':\n rendered_widgets})\n", (559, 636), False, 'from django.template.loader import render_to_string\n'), ((252, 263), 'django.forms.TextInput', 'TextInput', ([], {}), '()\n', (261, 263), False, 'from django.forms import TextInput\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple logger to look at eddn messages passing.
Terminate with a simple Ctlr+C
"""
import os
import sys
import time
import zlib
import argparse
import zmq
try:
import rapidjson as json
except ImportError:
import json
EDDN_ADDR = "tcp://eddn.edcd.io:9500"
TIMEOUT = 600000
SCHEMA_MAP = {
"https://eddn.edcd.io/schemas/commodity/3": "commodity",
"https://eddn.edcd.io/schemas/journal/1": "journal",
"https://eddn.edcd.io/schemas/outfitting/2": "outfitting",
"https://eddn.edcd.io/schemas/shipyard/2": "shipyard",
None: "unknown",
}
def get_msgs(sub, args):
""" Continuously receive messages and log them. """
while True:
msg = sub.recv()
if not msg:
raise zmq.ZMQError("Sub problem.")
msg = json.loads(zlib.decompress(msg).decode())
msg_str = json.dumps(msg, indent=2, sort_keys=True)
if args.print:
print(msg_str)
try:
fname = SCHEMA_MAP[msg["$schemaRef"]]
except KeyError:
fname = SCHEMA_MAP[None]
with open(os.path.join(args.log_d, fname) + '.json', 'a') as fout:
fout.write(msg_str + ',')
with open(os.path.join(args.log_d, fname + '.jsonl'), 'a') as fout:
fout.write(json.dumps(msg, indent=None, sort_keys=True) + ",\n")
def connect_loop(sub, args):
"""
Continuously connect and get messages until user cancels.
All messages logged to file and printed.
"""
while True:
try:
sub.connect(EDDN_ADDR)
get_msgs(sub, args)
except zmq.ZMQError as exc:
print("ZMQ Socket error. Reconnecting soon.\n", exc)
sub.discconect(EDDN_ADDR)
time.sleep(5)
def parser():
parser = argparse.ArgumentParser(description="EDDN Logger")
parser.add_argument('log_d', help='The folder to log files to.')
parser.add_argument('--no-print', dest='print', default=True,
action='store_false', help='Do not print to stdout')
return parser
def main():
args = parser().parse_args()
sub = zmq.Context().socket(zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b'')
sub.setsockopt(zmq.RCVTIMEO, TIMEOUT)
try:
for key in SCHEMA_MAP:
with open(os.path.join(args.log_d, SCHEMA_MAP[key]) + '.json', 'w') as fout:
fout.write('[\n')
with open(os.path.join(args.log_d, SCHEMA_MAP[key]) + '.jsonl', 'w') as fout:
fout.write('[\n')
connect_loop(sub, args)
except KeyboardInterrupt:
for key in SCHEMA_MAP:
with open(os.path.join(args.log_d, SCHEMA_MAP[key]) + '.json', 'a') as fout:
fout.write(']')
with open(os.path.join(args.log_d, SCHEMA_MAP[key]) + '.jsonl', 'a') as fout:
fout.write(']')
msg = """Terminating ZMQ connection.
{fname} contains all messages sorted into files by schema.
Files ending in .json contains all messages compact one per line.
Files ending in .jsonl contains all messages pretty printed."""
print(msg.format(fname=sys.argv[1]))
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"zmq.ZMQError",
"json.dumps",
"os.path.join",
"time.sleep",
"zmq.Context",
"zlib.decompress"
] | [((1810, 1860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""EDDN Logger"""'}), "(description='EDDN Logger')\n", (1833, 1860), False, 'import argparse\n'), ((881, 922), 'json.dumps', 'json.dumps', (['msg'], {'indent': '(2)', 'sort_keys': '(True)'}), '(msg, indent=2, sort_keys=True)\n', (891, 922), False, 'import json\n'), ((777, 805), 'zmq.ZMQError', 'zmq.ZMQError', (['"""Sub problem."""'], {}), "('Sub problem.')\n", (789, 805), False, 'import zmq\n'), ((2149, 2162), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (2160, 2162), False, 'import zmq\n'), ((1231, 1273), 'os.path.join', 'os.path.join', (['args.log_d', "(fname + '.jsonl')"], {}), "(args.log_d, fname + '.jsonl')\n", (1243, 1273), False, 'import os\n'), ((1767, 1780), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1777, 1780), False, 'import time\n'), ((832, 852), 'zlib.decompress', 'zlib.decompress', (['msg'], {}), '(msg)\n', (847, 852), False, 'import zlib\n'), ((1118, 1149), 'os.path.join', 'os.path.join', (['args.log_d', 'fname'], {}), '(args.log_d, fname)\n', (1130, 1149), False, 'import os\n'), ((1312, 1356), 'json.dumps', 'json.dumps', (['msg'], {'indent': 'None', 'sort_keys': '(True)'}), '(msg, indent=None, sort_keys=True)\n', (1322, 1356), False, 'import json\n'), ((2323, 2364), 'os.path.join', 'os.path.join', (['args.log_d', 'SCHEMA_MAP[key]'], {}), '(args.log_d, SCHEMA_MAP[key])\n', (2335, 2364), False, 'import os\n'), ((2446, 2487), 'os.path.join', 'os.path.join', (['args.log_d', 'SCHEMA_MAP[key]'], {}), '(args.log_d, SCHEMA_MAP[key])\n', (2458, 2487), False, 'import os\n'), ((2664, 2705), 'os.path.join', 'os.path.join', (['args.log_d', 'SCHEMA_MAP[key]'], {}), '(args.log_d, SCHEMA_MAP[key])\n', (2676, 2705), False, 'import os\n'), ((2785, 2826), 'os.path.join', 'os.path.join', (['args.log_d', 'SCHEMA_MAP[key]'], {}), '(args.log_d, SCHEMA_MAP[key])\n', (2797, 2826), False, 'import os\n')] |
# Generated by Django 3.2 on 2022-03-09 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_auto_20220309_1545'),
]
operations = [
migrations.AlterField(
model_name='user',
name='date_of_birth',
field=models.DateField(null=True, verbose_name='Date of birth'),
),
migrations.AlterField(
model_name='user',
name='identity_number',
field=models.CharField(max_length=32, null=True, verbose_name='Identity number'),
),
migrations.AlterUniqueTogether(
name='user',
unique_together=set(),
),
]
| [
"django.db.models.DateField",
"django.db.models.CharField"
] | [((336, 393), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)', 'verbose_name': '"""Date of birth"""'}), "(null=True, verbose_name='Date of birth')\n", (352, 393), False, 'from django.db import migrations, models\n'), ((522, 596), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'null': '(True)', 'verbose_name': '"""Identity number"""'}), "(max_length=32, null=True, verbose_name='Identity number')\n", (538, 596), False, 'from django.db import migrations, models\n')] |
#!/bin/python3
# pdfpng - convert pdf to png
# Copyright (C) 2022 ArcNyxx
# see LICENCE file for licensing information
import sys
import fitz as pdf
if len(sys.argv) != 2:
print("usage: pdfpng [file]")
sys.exit()
doc = pdf.open(sys.argv[1])
for num, page in enumerate(doc):
pixmap = page.get_pixmap()
pixmap.save(f"{sys.argv[1].split('.')[0]}{num}.png")
doc.close()
| [
"fitz.open",
"sys.exit"
] | [((230, 251), 'fitz.open', 'pdf.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (238, 251), True, 'import fitz as pdf\n'), ((212, 222), 'sys.exit', 'sys.exit', ([], {}), '()\n', (220, 222), False, 'import sys\n')] |
import socket
def is_connectable(host, port):
sock = None
try:
sock = socket.create_connection((host, port), 1)
result = True
except socket.error:
result = False
finally:
if sock:
sock.close()
return result
| [
"socket.create_connection"
] | [((94, 135), 'socket.create_connection', 'socket.create_connection', (['(host, port)', '(1)'], {}), '((host, port), 1)\n', (118, 135), False, 'import socket\n')] |
import pandas as pd
import matplotlib.pyplot as plt
from src.utils.function_libraries import *
from src.utils.data_utils import *
from src.utils.identification.PI_Identifier import PI_Identifier
from src.utils.solution_processing import *
from differentiation.spectral_derivative import compute_spectral_derivative
from differentiation.finite_diff import compute_finite_diff
from filtering.SpectralFilter import SpectralFilter
from filtering.KernelFilter import KernelFilter
from tools import halve, mirror, add_noise, downsample
from src.utils.theta_processing.single_pend import *
from sklearn.model_selection import train_test_split
import matplotlib as mpl
import os
import pickle
from containers.DynaFrame import DynaFrame, create_df
from definitions import ROOT_DIR
import sympy as sp
from sympy.utilities.codegen import codegen
style_path = os.path.join(ROOT_DIR, 'src', 'utils', 'visualization', 'BystrickyK.mplstyle')
print(style_path)
plt.style.use({'seaborn', style_path})
mpl.use('Qt5Agg')
datafile = 'singlePend.csv'
data_path = os.path.join(ROOT_DIR,'data','singlePend','simulated',datafile)
cache_path = os.path.join(ROOT_DIR,'src', 'singlePendulumCart', 'cache')
# Get training dataset
def load_data(data_path):
sim_data = pd.read_csv(data_path)
sim_data_x = sim_data.loc[:, ['s', 'phi1', 'Ds', 'Dphi']]
sim_data_x.columns = ['x_' + str(i) for i in [1,2,3,4]]
sim_data_dx = sim_data.loc[:, ['Ds', 'Dphi', 'DDs', 'DDphi']]
sim_data_dx.columns = ['dx_' + str(i) for i in [1,2,3,4]]
sim_data_u = sim_data.loc[:, 'u']
sim_data_t = sim_data.loc[:, 't']
sim_data = pd.concat([sim_data_t, sim_data_x, sim_data_dx, sim_data_u], axis=1)
sim_data = DynaFrame(sim_data)
dt = sim_data.get_dt()
sim_data = sim_data.reset_index(drop=True)
return DynaFrame(sim_data), dt
sim_data, dt = load_data(data_path)
sim_data, sim_data_test = train_test_split(sim_data, test_size=0.2,
shuffle=False, random_state=42)
#%%
# dx = compute_spectral_derivative(x, dt, mirroring=True)
# dx = create_df(dx, 'dx')
# filter = KernelFilter(kernel_size=51)
# dx = filter.filter(dx)
# compare_signals(DXt, dx, ['Clean', 'Filtered'], ylabels=['$\dot{x}_1 \; [m\; s^{-2}]$',
# '$\dot{x}_2 \; [rad\; s^{-2}]$'])
#
# ddx = compute_spectral_derivative(dx, dt)
# ddx = create_df(ddx, 'ddx')
# compare_signals(DDXt, ddx, ['Clean', 'Filtered'], ylabels=['$\ddot{x}_1 \; [m\; s^{-2}]$',
# '$\ddot{x}_2 \; [rad\; s^{-2}]$'])
#%% Downsample training data
sim_data = downsample(sim_data, 10).reset_index(drop=True)
sim_data = DynaFrame(sim_data)
sim_data_test = DynaFrame(sim_data_test)
# compare_signals(DX.iloc[:,[2,3]], downsample(DDXt.iloc[:,[0,1]], step),
# legend_str=['Filt','Clean'], ylabels=['a', 'b'])
#%%
def data_dict(sim_data):
data = {'X': sim_data.get_state_vars(),
'DX': sim_data.get_state_derivative_vars(),
'u': sim_data.get_input_vars()}
return data
data = data_dict(sim_data)
data_val = data_dict(sim_data_test)
theta_basis = create_basis(data)
theta_basis_val = create_basis(data_val)
theta_train = poly_library(theta_basis, (1,2,3,4))
theta_val = poly_library(theta_basis_val, (1,2,3,4))
#%%
theta_train = drop_bad_terms(theta_train)
theta_val = drop_bad_terms(theta_val)
theta_train.iloc[:,0] = 1
theta_train.iloc[0,0] = 1.00001
theta_val.iloc[:,0] = 1
theta_val.iloc[0,0] = 1.00001
# %% Compute the solution or retrieve it from cache
rewrite = True # Should the cache be rewritten
rewrite = False
eqns_to_identify = ['dx_3', 'dx_4'] # State derivatives whose equation we want to identify
cache_str = 'SPFinalDense'
eqns_models = {}
for eqn in eqns_to_identify:
# find cols with other state derivatives than the one currently being identified
idx = np.array([('d' in col and eqn not in col) for col in theta_train.columns])
print(f'ii {np.sum(idx)}')
# Construct a library for identifying the desired equation
theta_hat_train = theta_train.loc[:, ~idx]
eqns_models[eqn] = {}
eqns_models[eqn]['theta_train'] = theta_hat_train
# corr = theta_hat_train.corr()
# plot_corr(corr, theta_hat_train.columns, labels=False, ticks=True)
cachename = cache_str + '_' + eqn
cachename = os.path.join(cache_path, cachename)
if os.path.exists(cachename) and not rewrite:
print("Retrieving solution from cache.")
with open(cachename, 'rb') as f:
eqns_models[eqn] = pickle.load(f)
else:
print("No solution in cache, calculating solution from scratch.")
EqnIdentifier = PI_Identifier(theta_hat_train)
EqnIdentifier.set_thresh_range(lims=(0.000001, 0.01), n=5)
EqnIdentifier.set_target(eqn)
EqnIdentifier.create_models(n_models=theta_hat_train.shape[1], iters=8, shuffle=False)
eqns_models[eqn]['models'] = EqnIdentifier.models
with open(cachename, 'wb') as f:
pickle.dump(eqns_models[eqn], f)
# %%
sim_data_xu = pd.concat([sim_data_test.get_state_vars(),
sim_data_test.get_input_vars()],
axis=1).reset_index(drop=True)
sim_data_dx = sim_data_test.get_state_derivative_vars().reset_index(drop=True)
dynamic_model = {}
for target_models_str, eqn_model in eqns_models.items():
theta_train = eqn_model['theta_train']
col_names = theta_train.columns
theta_sub_val = theta_val.loc[:, col_names]
models = eqn_model['models']
dynamic_model[target_models_str] = {}
# %% Remove duplicate models
models = model_unique(models)
models = model_activations(models)
models = model_val_rmse(models, theta_sub_val)
# plot_implicit_sols(models, col_names, show_labels=False, axislabels=False)
# Calculate AIC for each model
models = model_aic(models, theta_sub_val)
#%%
# model_metrics = models.loc[:, ['n_terms', 'train_metric', 'validation_metric', 'aic']]
# model_metrics = model_metrics.sort_values('n_terms')
# fig, axs = plt.subplots(ncols=2, tight_layout=True, sharex=True)
# axs[0].plot(model_metrics['n_terms'], model_metrics['train_metric'], 'o',
# color='tab:blue', alpha=0.7)
# axs[0].set_yscale('log')
# axs[0].set_xlabel("$Number\ of\ terms$")
# axs[0].set_ylabel("$Training\ RMSE$")
#
# axs[1].plot(model_metrics['n_terms'], model_metrics['validation_metric'],
# 'o', color='tab:red', alpha=0.7)
# axs[1].set_yscale('log')
# axs[1].set_xlabel("$Number\ of\ terms$")
# axs[1].set_ylabel("$Validation\ RMSE$")
# %% Look for consistent models by finding clusters in the term activation space
models = model_consistent(models, min_cluster_size=2)
# Discard non-sparse models
models = model_sparse(models, threshold=10)
# plot_implicit_sols(models, col_names, show_labels=False, axislabels=True)
models = model_equation_strings(models, col_names)
vars = ['x_1', 'x_2', 'x_3', 'x_4', 'u']
lhsvar = target_models_str
# Create symbolic implicit equations column
models = model_symbolic_implicit_eqns(models, lhsvar)
#%%
# Drop bad models
aic_thresh = models['aic'].max() * 0.1
models = models[ models['aic'] < aic_thresh ] # Keep models under the threshold
models = model_symbolic_eqn(models, lhsvar)
models = model_lambdify_eqn(models, vars)
models = models.reset_index(drop=True)
# %%
plot_implicit_sols(models, col_names, show_labels=True)
plt.show()
# %% Decompose one of the models
# choice = int(input("Choose model index:"))
choice = models['aic'].argmin()
best_model = models.loc[choice]
# %%
dynamic_model[target_models_str]['symeqn'] = best_model['eqn_sym']
dynamic_model[target_models_str]['str'] = best_model['eqn_sym_implicit']
dynamic_model[target_models_str]['models'] = models
dynamic_model[target_models_str]['choice'] = best_model
derivative_trajectory_model = np.apply_along_axis(best_model['eqn_lambda'], axis=1, arr=sim_data_xu)
derivative_trajectory_real = sim_data_dx.loc[:, target_models_str]
dynamic_model[target_models_str]['model_val_traj'] = derivative_trajectory_model
dynamic_model[target_models_str]['real_val_traj'] = derivative_trajectory_real
#%%
derivative_trajectory_real = []
derivative_trajectory_model = []
for eqn in eqns_to_identify:
dx_traj_model = dynamic_model[eqn]['model_val_traj']
dx_traj_real = dynamic_model[eqn]['real_val_traj']
derivative_trajectory_model.append(dx_traj_model)
derivative_trajectory_real.append(dx_traj_real)
derivative_trajectory_model = np.array(derivative_trajectory_model).T
derivative_trajectory_real = np.array(derivative_trajectory_real).T
# fig = plt.figure(tight_layout=True, figsize=(9,8))
compare_signals(derivative_trajectory_real, derivative_trajectory_model,
['Real', 'Model'], ['$\\dot{x_3}$', '$\\dot{x_4}$'])
#%%
def round_expr(expr, num_digits):
return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(sp.Number)})
symeqns = [dynamic_model[eqn]['symeqn'] for eqn in eqns_to_identify]
symeqns = [round_expr(sp.simplify(sp.factor(eqn)), 5) for eqn in symeqns]
latex_output = ' \\\\ \n '.join([sp.latex(eqn) for eqn in symeqns])
latex_output_file = 'model_latex.txt'
with open(latex_output_file, 'w') as file:
file.write(latex_output)
os.chdir('models')
codegen(('identified_model_clean', symeqns),
language='octave', to_files=True)
#%%
sim_data = DynaFrame(sim_data)
plot_signals(sim_data.get_state_vars(),
# ['$\\dot{x_3}$', '$\\dot{x_4}$']
['$x_1\ [m]$',
'$x_2\ [rad]$',
'$x_3=\\dot{x}_1\ [\\frac{m}{s}]$',
'$x_4=\\dot{x}_2\ [\\frac{rad}{s}]$'])
#%% Save good guess columns
good_guesses = []
for eqn, results in dynamic_model.items():
print(eqn)
models = results['models']
active_cols = models['active'].values
active_cols = np.vstack(active_cols)
active_cols = active_cols.any(axis=0)
good_guesses.append(active_cols)
good_guesses = np.array(good_guesses)
# good_guesses = good_guesses.any(axis=0)
# good_guesses = np.argwhere(good_guesses).T[0]
good_guesses = [np.argwhere(g).T[0] for g in good_guesses]
cache_path = os.path.join(ROOT_DIR,'src', 'singlePendulumCart', 'cache')
guess_cache_name = 'guessColumnsReal'
guess_cache_path = os.path.join(cache_path, guess_cache_name)
with open(guess_cache_path, 'wb') as f:
pickle.dump(good_guesses, f)
| [
"os.path.exists",
"pickle.dump",
"sympy.utilities.codegen.codegen",
"pandas.read_csv",
"matplotlib.use",
"sklearn.model_selection.train_test_split",
"src.utils.identification.PI_Identifier.PI_Identifier",
"os.path.join",
"matplotlib.pyplot.style.use",
"sympy.latex",
"pickle.load",
"os.chdir",
... | [((849, 927), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""src"""', '"""utils"""', '"""visualization"""', '"""BystrickyK.mplstyle"""'], {}), "(ROOT_DIR, 'src', 'utils', 'visualization', 'BystrickyK.mplstyle')\n", (861, 927), False, 'import os\n'), ((946, 984), 'matplotlib.pyplot.style.use', 'plt.style.use', (["{'seaborn', style_path}"], {}), "({'seaborn', style_path})\n", (959, 984), True, 'import matplotlib.pyplot as plt\n'), ((986, 1003), 'matplotlib.use', 'mpl.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (993, 1003), True, 'import matplotlib as mpl\n'), ((1045, 1112), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data"""', '"""singlePend"""', '"""simulated"""', 'datafile'], {}), "(ROOT_DIR, 'data', 'singlePend', 'simulated', datafile)\n", (1057, 1112), False, 'import os\n'), ((1122, 1182), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""src"""', '"""singlePendulumCart"""', '"""cache"""'], {}), "(ROOT_DIR, 'src', 'singlePendulumCart', 'cache')\n", (1134, 1182), False, 'import os\n'), ((1887, 1960), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sim_data'], {'test_size': '(0.2)', 'shuffle': '(False)', 'random_state': '(42)'}), '(sim_data, test_size=0.2, shuffle=False, random_state=42)\n', (1903, 1960), False, 'from sklearn.model_selection import train_test_split\n'), ((2722, 2741), 'containers.DynaFrame.DynaFrame', 'DynaFrame', (['sim_data'], {}), '(sim_data)\n', (2731, 2741), False, 'from containers.DynaFrame import DynaFrame, create_df\n'), ((2758, 2782), 'containers.DynaFrame.DynaFrame', 'DynaFrame', (['sim_data_test'], {}), '(sim_data_test)\n', (2767, 2782), False, 'from containers.DynaFrame import DynaFrame, create_df\n'), ((9511, 9529), 'os.chdir', 'os.chdir', (['"""models"""'], {}), "('models')\n", (9519, 9529), False, 'import os\n'), ((9530, 9608), 'sympy.utilities.codegen.codegen', 'codegen', (["('identified_model_clean', symeqns)"], {'language': '"""octave"""', 'to_files': '(True)'}), "(('identified_model_clean', symeqns), language='octave', to_files=True)\n", (9537, 9608), False, 'from sympy.utilities.codegen import codegen\n'), ((9633, 9652), 'containers.DynaFrame.DynaFrame', 'DynaFrame', (['sim_data'], {}), '(sim_data)\n', (9642, 9652), False, 'from containers.DynaFrame import DynaFrame, create_df\n'), ((10397, 10457), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""src"""', '"""singlePendulumCart"""', '"""cache"""'], {}), "(ROOT_DIR, 'src', 'singlePendulumCart', 'cache')\n", (10409, 10457), False, 'import os\n'), ((10514, 10556), 'os.path.join', 'os.path.join', (['cache_path', 'guess_cache_name'], {}), '(cache_path, guess_cache_name)\n', (10526, 10556), False, 'import os\n'), ((1247, 1269), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (1258, 1269), True, 'import pandas as pd\n'), ((1611, 1679), 'pandas.concat', 'pd.concat', (['[sim_data_t, sim_data_x, sim_data_dx, sim_data_u]'], {'axis': '(1)'}), '([sim_data_t, sim_data_x, sim_data_dx, sim_data_u], axis=1)\n', (1620, 1679), True, 'import pandas as pd\n'), ((1695, 1714), 'containers.DynaFrame.DynaFrame', 'DynaFrame', (['sim_data'], {}), '(sim_data)\n', (1704, 1714), False, 'from containers.DynaFrame import DynaFrame, create_df\n'), ((4398, 4433), 'os.path.join', 'os.path.join', (['cache_path', 'cachename'], {}), '(cache_path, cachename)\n', (4410, 4433), False, 'import os\n'), ((7619, 7629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7627, 7629), True, 'import matplotlib.pyplot as plt\n'), ((10601, 10629), 'pickle.dump', 'pickle.dump', (['good_guesses', 'f'], {}), '(good_guesses, f)\n', (10612, 10629), False, 'import pickle\n'), ((1800, 1819), 'containers.DynaFrame.DynaFrame', 'DynaFrame', (['sim_data'], {}), '(sim_data)\n', (1809, 1819), False, 'from containers.DynaFrame import DynaFrame, create_df\n'), ((2663, 2687), 'tools.downsample', 'downsample', (['sim_data', '(10)'], {}), '(sim_data, 10)\n', (2673, 2687), False, 'from tools import halve, mirror, add_noise, downsample\n'), ((4442, 4467), 'os.path.exists', 'os.path.exists', (['cachename'], {}), '(cachename)\n', (4456, 4467), False, 'import os\n'), ((4729, 4759), 'src.utils.identification.PI_Identifier.PI_Identifier', 'PI_Identifier', (['theta_hat_train'], {}), '(theta_hat_train)\n', (4742, 4759), False, 'from src.utils.identification.PI_Identifier import PI_Identifier\n'), ((9364, 9377), 'sympy.latex', 'sp.latex', (['eqn'], {}), '(eqn)\n', (9372, 9377), True, 'import sympy as sp\n'), ((4606, 4620), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4617, 4620), False, 'import pickle\n'), ((5071, 5103), 'pickle.dump', 'pickle.dump', (['eqns_models[eqn]', 'f'], {}), '(eqns_models[eqn], f)\n', (5082, 5103), False, 'import pickle\n'), ((9289, 9303), 'sympy.factor', 'sp.factor', (['eqn'], {}), '(eqn)\n', (9298, 9303), True, 'import sympy as sp\n')] |
import logging
import os
import json
from errors import throw_input_data_is_corrupted, throw_table_does_not_exist
DATA_FILE_SUFFIX = '.db'
def generate_data_file_name(table):
return table+DATA_FILE_SUFFIX
def get_table_name_from_data_file(data_file):
return data_file.replace(DATA_FILE_SUFFIX,'')
def split_data(data):
splitted = data.split(",")
if len(splitted) != 2:
throw_input_data_is_corrupted()
return splitted[0], splitted[1]
def check_if_table_exists(table):
data_file_name = generate_data_file_name(table)
if os.path.exists(data_file_name):
return True
return False
def read_table_into_json(table):
if not check_if_table_exists(table):
throw_table_does_not_exist(table)
data_file = generate_data_file_name(table)
with open(data_file, 'r') as f:
file_data = f.read()
return json.loads(file_data) if not file_data == "" else {}
def write_json_into_table(table, json_data):
if not check_if_table_exists(table):
throw_table_does_not_exist(table)
data_file = generate_data_file_name(table)
with open(data_file, 'w') as f:
f.write(json.dumps(json_data)) | [
"os.path.exists",
"json.loads",
"errors.throw_table_does_not_exist",
"json.dumps",
"errors.throw_input_data_is_corrupted"
] | [((559, 589), 'os.path.exists', 'os.path.exists', (['data_file_name'], {}), '(data_file_name)\n', (573, 589), False, 'import os\n'), ((397, 428), 'errors.throw_input_data_is_corrupted', 'throw_input_data_is_corrupted', ([], {}), '()\n', (426, 428), False, 'from errors import throw_input_data_is_corrupted, throw_table_does_not_exist\n'), ((711, 744), 'errors.throw_table_does_not_exist', 'throw_table_does_not_exist', (['table'], {}), '(table)\n', (737, 744), False, 'from errors import throw_input_data_is_corrupted, throw_table_does_not_exist\n'), ((873, 894), 'json.loads', 'json.loads', (['file_data'], {}), '(file_data)\n', (883, 894), False, 'import json\n'), ((1021, 1054), 'errors.throw_table_does_not_exist', 'throw_table_does_not_exist', (['table'], {}), '(table)\n', (1047, 1054), False, 'from errors import throw_input_data_is_corrupted, throw_table_does_not_exist\n'), ((1159, 1180), 'json.dumps', 'json.dumps', (['json_data'], {}), '(json_data)\n', (1169, 1180), False, 'import json\n')] |
import tempfile
import pandas as pd
from pg2pd import Pg2Pd
def test_make_df_1(pg_conn):
"""Test of main Postgres binary data to Pandas dataframe pipeline.
This tests an integer and varchar.
"""
cursor = pg_conn.cursor()
# Copy binary data to a tempfile
path = tempfile.mkstemp()[1]
query = 'COPY test1 TO STDOUT BINARY;'
with open(path, 'wb') as f:
cursor.copy_expert(sql=query, file=f)
pg_conn.commit()
pg = Pg2Pd(path, ['integer', 'varchar'], ['id', 'text'])
df = pg.make_df()
assert df['id'].tolist() == [42, 25, 60]
assert df['text'].tolist()[:2] == ['Some cool data', 'Even more cool data']
# Note that NaN != NaN, so we can do this assertion instead
assert pd.isna(df['text'].tolist()[2])
def test_make_df_2(pg_conn):
"""Test of main Postgres binary data to Pandas dataframe pipeline.
This tests boolean data.
"""
cursor = pg_conn.cursor()
# Copy binary data to a tempfile
path = tempfile.mkstemp()[1]
query = 'COPY test2 TO STDOUT BINARY;'
with open(path, 'wb') as f:
cursor.copy_expert(sql=query, file=f)
pg_conn.commit()
pg = Pg2Pd(path, ['boolean', 'boolean'], ['t', 'f'])
df = pg.make_df()
assert df['t'].tolist() == [True]
assert df['f'].tolist() == [False]
| [
"tempfile.mkstemp",
"pg2pd.Pg2Pd"
] | [((469, 520), 'pg2pd.Pg2Pd', 'Pg2Pd', (['path', "['integer', 'varchar']", "['id', 'text']"], {}), "(path, ['integer', 'varchar'], ['id', 'text'])\n", (474, 520), False, 'from pg2pd import Pg2Pd\n'), ((291, 309), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (307, 309), False, 'import tempfile\n'), ((996, 1014), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1012, 1014), False, 'import tempfile\n'), ((1178, 1225), 'pg2pd.Pg2Pd', 'Pg2Pd', (['path', "['boolean', 'boolean']", "['t', 'f']"], {}), "(path, ['boolean', 'boolean'], ['t', 'f'])\n", (1183, 1225), False, 'from pg2pd import Pg2Pd\n')] |
import asyncio
from NewsClassificator.news import News
@asyncio.coroutine
def read(file_name, code='utf-8'):
"""
async generator to read file in with special delimeter
:param file_name: the way to the file
:param code: encoding of file (utf-8)
:return: generator with all parts of file
"""
with open(file_name, 'r', encoding=code) as file:
for line in file:
yield News(line)
yield ""
| [
"NewsClassificator.news.News"
] | [((415, 425), 'NewsClassificator.news.News', 'News', (['line'], {}), '(line)\n', (419, 425), False, 'from NewsClassificator.news import News\n')] |
from django.db import models
from django.conf import settings
from django.contrib.postgres.search import SearchVectorField
from django.contrib.auth.models import AbstractUser
from django.utils import timezone
# BOOK-RELATED MODELS
class Books(models.Model):
title = models.CharField(max_length=5125)
year = models.IntegerField()
authors = models.CharField(max_length=5125, null=True)
book_type = models.CharField(max_length=13)
isbn = models.CharField(max_length=13, null=True)
pages = models.IntegerField()
editions = models.IntegerField()
alt_titles = models.CharField(max_length=5125, null=True)
series_str_1 = models.CharField(max_length=5125, null=True)
series_str_2 = models.CharField(max_length=5125, null=True)
original_lang = models.CharField(max_length=40)
original_title = models.CharField(max_length=5125, null=True)
original_year = models.IntegerField()
isfdb_rating = models.FloatField()
award_winner = models.BooleanField()
juvenile = models.BooleanField()
stand_alone = models.BooleanField()
inconsistent = models.BooleanField()
virtual = models.BooleanField()
cover_image = models.CharField(max_length=5125, null=True)
wikipedia = models.CharField(max_length=20000, null=True)
synopsis = models.CharField(max_length=20000, null=True)
note = models.CharField(max_length=5125, null=True)
general_search = SearchVectorField(null=True)
options = {
'managed' : False,
}
def __str__(self):
return self.title
class Isbns(models.Model):
isbn = models.CharField(max_length=13)
title_id = models.IntegerField()
options = {
'managed' : False,
}
def __str__(self):
return self.isbn
class Translations(models.Model):
lowest_title_id = models.IntegerField()
title = models.CharField(max_length=5125)
year = models.IntegerField()
note = models.CharField(max_length=20000)
options = {
'managed' : False,
}
def __str__(self):
return self.title
class Contents(models.Model):
book_title_id = models.IntegerField()
content_title_id = models.IntegerField()
options = {
'managed' : False,
}
def __str__(self):
return str(self.book_title_id)
class More_Images(models.Model):
title_id = models.IntegerField()
image = models.CharField(max_length=5125)
options = {
'managed' : False,
}
def __str__(self):
return self.image
class Words(models.Model):
word = models.CharField(primary_key=True, max_length=5125)
options = {
'managed' : False,
}
def __str__(self):
return self.word
# USER-RELATED MODELS
class User(AbstractUser):
location = models.CharField(max_length=250, null=True)
age = models.IntegerField(null=True)
virtual = models.BooleanField(null=False, default=False)
class Meta:
indexes = [
models.Index(fields=['username']),
models.Index(fields=['first_name']),
]
def __str__(self):
return str(self.id) + ": " + self.first_name + " " + self.last_name
class Book_Club(models.Model):
name = models.CharField(max_length=256, null=True)
members = models.ManyToManyField(
User,
related_name="book_clubs",
verbose_name="Members of the club"
)
virtual = models.BooleanField(null=False, default=False)
virtual_member = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='virtual_member_of',
null=True
)
class Meta:
indexes = [
models.Index(fields=['name']),
models.Index(fields=['virtual']),
]
def __str__(self):
return self.name
class Meeting(models.Model):
book_club = models.ForeignKey(
Book_Club, on_delete=models.CASCADE, null=False)
book = models.ForeignKey(
Books, on_delete=models.DO_NOTHING, null=True, db_constraint=False,)
date = models.DateField(null=True)
class Meta:
indexes = [
models.Index(fields=['book_club']),
models.Index(fields=['book']),
models.Index(fields=['date']),
]
def __str__(self):
return self.book.title + '(' + str(self.date) + ')'
class Rating(models.Model):
book = models.ForeignKey(
Books, null=False, db_constraint=False, on_delete=models.DO_NOTHING)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, on_delete=models.CASCADE)
rating = models.FloatField(null=True)
predicted_rating = models.FloatField(null=True)
original_rating = models.FloatField(null=True)
original_min = models.FloatField(null=True)
original_max = models.FloatField(null=True)
saved = models.BooleanField(null=False, default=False)
blocked = models.BooleanField(null=False, default=False)
last_updated = models.DateTimeField(default=timezone.now)
class Meta:
indexes = [
models.Index(fields=['book']),
models.Index(fields=['user']),
models.Index(fields=['rating']),
models.Index(models.Func('rating', function='FLOOR'),
name='floor_rating_idx'),
models.Index(fields=['predicted_rating']),
models.Index(models.Func('predicted_rating', function='FLOOR'),
name='floor_predicted_rating_idx'),
models.Index(fields=['saved']),
models.Index(fields=['blocked']),
models.Index(fields=['last_updated'])
]
constraints = [
models.UniqueConstraint(
fields=['book', 'user'], name='OneRatingPerBookAndUser'
),
models.CheckConstraint(check=models.Q(rating__gte=1),
name="RatingAtLeast1"
),
models.CheckConstraint(check=models.Q(rating__lte=10),
name="RatingAtMost10"
),
models.CheckConstraint(check=models.Q(original_rating__gte=models.F('original_min')),
name="OriginalRatingAtLeastMin"
),
models.CheckConstraint(check=models.Q(original_rating__lte=models.F('original_max')),
name="OriginalRatingAtMostMax"
),
]
def __str__(self):
if self.rating != None:
return self.user.first_name + " rates " + \
str(self.rating) + " to " + self.book.title
else:
return self.user.first_name + " hasn't rated " + self.book.title
class DataProblem(models.Model):
book = models.ForeignKey(
Books, null=False, on_delete=models.DO_NOTHING, db_constraint=False,)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, on_delete=models.CASCADE)
problem = models.CharField(max_length=32768)
class Meta:
indexes = [
models.Index(fields=['book']),
models.Index(fields=['user']),
]
def __str__(self):
return self.book.title | [
"django.db.models.Index",
"django.db.models.OneToOneField",
"django.db.models.FloatField",
"django.db.models.DateField",
"django.db.models.UniqueConstraint",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.Func",
"django.db.models.ManyToManyField",
"django.db.mode... | [((278, 311), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)'}), '(max_length=5125)\n', (294, 311), False, 'from django.db import models\n'), ((323, 344), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (342, 344), False, 'from django.db import models\n'), ((359, 403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (375, 403), False, 'from django.db import models\n'), ((420, 451), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(13)'}), '(max_length=13)\n', (436, 451), False, 'from django.db import models\n'), ((463, 505), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(13)', 'null': '(True)'}), '(max_length=13, null=True)\n', (479, 505), False, 'from django.db import models\n'), ((518, 539), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (537, 539), False, 'from django.db import models\n'), ((555, 576), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (574, 576), False, 'from django.db import models\n'), ((594, 638), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (610, 638), False, 'from django.db import models\n'), ((658, 702), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (674, 702), False, 'from django.db import models\n'), ((722, 766), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (738, 766), False, 'from django.db import models\n'), ((787, 818), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (803, 818), False, 'from django.db import models\n'), ((840, 884), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (856, 884), False, 'from django.db import models\n'), ((905, 926), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (924, 926), False, 'from django.db import models\n'), ((946, 965), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (963, 965), False, 'from django.db import models\n'), ((985, 1006), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1004, 1006), False, 'from django.db import models\n'), ((1022, 1043), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1041, 1043), False, 'from django.db import models\n'), ((1062, 1083), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1081, 1083), False, 'from django.db import models\n'), ((1103, 1124), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1122, 1124), False, 'from django.db import models\n'), ((1139, 1160), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1158, 1160), False, 'from django.db import models\n'), ((1179, 1223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (1195, 1223), False, 'from django.db import models\n'), ((1240, 1285), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)', 'null': '(True)'}), '(max_length=20000, null=True)\n', (1256, 1285), False, 'from django.db import models\n'), ((1301, 1346), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)', 'null': '(True)'}), '(max_length=20000, null=True)\n', (1317, 1346), False, 'from django.db import models\n'), ((1358, 1402), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)', 'null': '(True)'}), '(max_length=5125, null=True)\n', (1374, 1402), False, 'from django.db import models\n'), ((1424, 1452), 'django.contrib.postgres.search.SearchVectorField', 'SearchVectorField', ([], {'null': '(True)'}), '(null=True)\n', (1441, 1452), False, 'from django.contrib.postgres.search import SearchVectorField\n'), ((1593, 1624), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(13)'}), '(max_length=13)\n', (1609, 1624), False, 'from django.db import models\n'), ((1640, 1661), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1659, 1661), False, 'from django.db import models\n'), ((1819, 1840), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1838, 1840), False, 'from django.db import models\n'), ((1853, 1886), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)'}), '(max_length=5125)\n', (1869, 1886), False, 'from django.db import models\n'), ((1898, 1919), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1917, 1919), False, 'from django.db import models\n'), ((1931, 1965), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)'}), '(max_length=20000)\n', (1947, 1965), False, 'from django.db import models\n'), ((2118, 2139), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2137, 2139), False, 'from django.db import models\n'), ((2163, 2184), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2182, 2184), False, 'from django.db import models\n'), ((2348, 2369), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2367, 2369), False, 'from django.db import models\n'), ((2382, 2415), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5125)'}), '(max_length=5125)\n', (2398, 2415), False, 'from django.db import models\n'), ((2556, 2607), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(5125)'}), '(primary_key=True, max_length=5125)\n', (2572, 2607), False, 'from django.db import models\n'), ((2780, 2823), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'null': '(True)'}), '(max_length=250, null=True)\n', (2796, 2823), False, 'from django.db import models\n'), ((2834, 2864), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (2853, 2864), False, 'from django.db import models\n'), ((2879, 2925), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'null': '(False)', 'default': '(False)'}), '(null=False, default=False)\n', (2898, 2925), False, 'from django.db import models\n'), ((3218, 3261), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'null': '(True)'}), '(max_length=256, null=True)\n', (3234, 3261), False, 'from django.db import models\n'), ((3276, 3372), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""book_clubs"""', 'verbose_name': '"""Members of the club"""'}), "(User, related_name='book_clubs', verbose_name=\n 'Members of the club')\n", (3298, 3372), False, 'from django.db import models\n'), ((3413, 3459), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'null': '(False)', 'default': '(False)'}), '(null=False, default=False)\n', (3432, 3459), False, 'from django.db import models\n'), ((3481, 3602), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""virtual_member_of"""', 'null': '(True)'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='virtual_member_of', null=True)\n", (3501, 3602), False, 'from django.db import models\n'), ((3868, 3934), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Book_Club'], {'on_delete': 'models.CASCADE', 'null': '(False)'}), '(Book_Club, on_delete=models.CASCADE, null=False)\n', (3885, 3934), False, 'from django.db import models\n'), ((3955, 4044), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Books'], {'on_delete': 'models.DO_NOTHING', 'null': '(True)', 'db_constraint': '(False)'}), '(Books, on_delete=models.DO_NOTHING, null=True,\n db_constraint=False)\n', (3972, 4044), False, 'from django.db import models\n'), ((4062, 4089), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (4078, 4089), False, 'from django.db import models\n'), ((4395, 4486), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Books'], {'null': '(False)', 'db_constraint': '(False)', 'on_delete': 'models.DO_NOTHING'}), '(Books, null=False, db_constraint=False, on_delete=models.\n DO_NOTHING)\n', (4412, 4486), False, 'from django.db import models\n'), ((4503, 4589), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(False)', 'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, null=False, on_delete=models.\n CASCADE)\n', (4520, 4589), False, 'from django.db import models\n'), ((4607, 4635), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (4624, 4635), False, 'from django.db import models\n'), ((4659, 4687), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (4676, 4687), False, 'from django.db import models\n'), ((4710, 4738), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (4727, 4738), False, 'from django.db import models\n'), ((4758, 4786), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (4775, 4786), False, 'from django.db import models\n'), ((4806, 4834), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)'}), '(null=True)\n', (4823, 4834), False, 'from django.db import models\n'), ((4847, 4893), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'null': '(False)', 'default': '(False)'}), '(null=False, default=False)\n', (4866, 4893), False, 'from django.db import models\n'), ((4908, 4954), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'null': '(False)', 'default': '(False)'}), '(null=False, default=False)\n', (4927, 4954), False, 'from django.db import models\n'), ((4974, 5016), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (4994, 5016), False, 'from django.db import models\n'), ((6660, 6750), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Books'], {'null': '(False)', 'on_delete': 'models.DO_NOTHING', 'db_constraint': '(False)'}), '(Books, null=False, on_delete=models.DO_NOTHING,\n db_constraint=False)\n', (6677, 6750), False, 'from django.db import models\n'), ((6768, 6854), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(False)', 'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, null=False, on_delete=models.\n CASCADE)\n', (6785, 6854), False, 'from django.db import models\n'), ((6873, 6907), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32768)'}), '(max_length=32768)\n', (6889, 6907), False, 'from django.db import models\n'), ((2979, 3012), 'django.db.models.Index', 'models.Index', ([], {'fields': "['username']"}), "(fields=['username'])\n", (2991, 3012), False, 'from django.db import models\n'), ((3026, 3061), 'django.db.models.Index', 'models.Index', ([], {'fields': "['first_name']"}), "(fields=['first_name'])\n", (3038, 3061), False, 'from django.db import models\n'), ((3686, 3715), 'django.db.models.Index', 'models.Index', ([], {'fields': "['name']"}), "(fields=['name'])\n", (3698, 3715), False, 'from django.db import models\n'), ((3729, 3761), 'django.db.models.Index', 'models.Index', ([], {'fields': "['virtual']"}), "(fields=['virtual'])\n", (3741, 3761), False, 'from django.db import models\n'), ((4139, 4173), 'django.db.models.Index', 'models.Index', ([], {'fields': "['book_club']"}), "(fields=['book_club'])\n", (4151, 4173), False, 'from django.db import models\n'), ((4187, 4216), 'django.db.models.Index', 'models.Index', ([], {'fields': "['book']"}), "(fields=['book'])\n", (4199, 4216), False, 'from django.db import models\n'), ((4230, 4259), 'django.db.models.Index', 'models.Index', ([], {'fields': "['date']"}), "(fields=['date'])\n", (4242, 4259), False, 'from django.db import models\n'), ((5066, 5095), 'django.db.models.Index', 'models.Index', ([], {'fields': "['book']"}), "(fields=['book'])\n", (5078, 5095), False, 'from django.db import models\n'), ((5109, 5138), 'django.db.models.Index', 'models.Index', ([], {'fields': "['user']"}), "(fields=['user'])\n", (5121, 5138), False, 'from django.db import models\n'), ((5152, 5183), 'django.db.models.Index', 'models.Index', ([], {'fields': "['rating']"}), "(fields=['rating'])\n", (5164, 5183), False, 'from django.db import models\n'), ((5306, 5347), 'django.db.models.Index', 'models.Index', ([], {'fields': "['predicted_rating']"}), "(fields=['predicted_rating'])\n", (5318, 5347), False, 'from django.db import models\n'), ((5490, 5520), 'django.db.models.Index', 'models.Index', ([], {'fields': "['saved']"}), "(fields=['saved'])\n", (5502, 5520), False, 'from django.db import models\n'), ((5534, 5566), 'django.db.models.Index', 'models.Index', ([], {'fields': "['blocked']"}), "(fields=['blocked'])\n", (5546, 5566), False, 'from django.db import models\n'), ((5580, 5617), 'django.db.models.Index', 'models.Index', ([], {'fields': "['last_updated']"}), "(fields=['last_updated'])\n", (5592, 5617), False, 'from django.db import models\n'), ((5664, 5749), 'django.db.models.UniqueConstraint', 'models.UniqueConstraint', ([], {'fields': "['book', 'user']", 'name': '"""OneRatingPerBookAndUser"""'}), "(fields=['book', 'user'], name='OneRatingPerBookAndUser'\n )\n", (5687, 5749), False, 'from django.db import models\n'), ((6957, 6986), 'django.db.models.Index', 'models.Index', ([], {'fields': "['book']"}), "(fields=['book'])\n", (6969, 6986), False, 'from django.db import models\n'), ((7000, 7029), 'django.db.models.Index', 'models.Index', ([], {'fields': "['user']"}), "(fields=['user'])\n", (7012, 7029), False, 'from django.db import models\n'), ((5210, 5249), 'django.db.models.Func', 'models.Func', (['"""rating"""'], {'function': '"""FLOOR"""'}), "('rating', function='FLOOR')\n", (5221, 5249), False, 'from django.db import models\n'), ((5374, 5423), 'django.db.models.Func', 'models.Func', (['"""predicted_rating"""'], {'function': '"""FLOOR"""'}), "('predicted_rating', function='FLOOR')\n", (5385, 5423), False, 'from django.db import models\n'), ((5817, 5840), 'django.db.models.Q', 'models.Q', ([], {'rating__gte': '(1)'}), '(rating__gte=1)\n', (5825, 5840), False, 'from django.db import models\n'), ((5937, 5961), 'django.db.models.Q', 'models.Q', ([], {'rating__lte': '(10)'}), '(rating__lte=10)\n', (5945, 5961), False, 'from django.db import models\n'), ((6088, 6112), 'django.db.models.F', 'models.F', (['"""original_min"""'], {}), "('original_min')\n", (6096, 6112), False, 'from django.db import models\n'), ((6250, 6274), 'django.db.models.F', 'models.F', (['"""original_max"""'], {}), "('original_max')\n", (6258, 6274), False, 'from django.db import models\n')] |
""" Tests for the client credentials grant flow. """
from txoauth2.clients import PublicClient
from txoauth2.token import TokenResource
from txoauth2.errors import UnauthorizedClientError, MissingParameterError, \
MultipleParameterError, InvalidScopeError
from tests import getTestPasswordClient
from tests.unit.testTokenResource import Abstract
class TestClientCredentialsGrant(Abstract.TokenResourceTest):
"""
Test the functionality of the Client Credentials Grant.
See https://tools.ietf.org/html/rfc6749#section-4.4
"""
def testUnauthorizedClient(self):
"""
Test the rejection of a request with a client who is
not authorized to use the Client Credentials grant.
"""
client = getTestPasswordClient('unauthorizedClientCredentialsGrantClient',
authorizedGrantTypes=[])
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': ' '.join(self._VALID_SCOPE),
}, authentication=client)
self._CLIENT_STORAGE.addClient(client)
result = self._TOKEN_RESOURCE.render_POST(request)
self.assertFailedTokenRequest(
request, result, UnauthorizedClientError('client_credentials'),
msg='Expected the resource token to reject a client_credentials request '
'with a client that is not authorized to use that grant type.')
def testPublicClient(self):
""" Test the rejection of a request with a public client. """
client = PublicClient('unauthorizedClientCredentialsGrantClient',
['https://return.nonexistent'], ['client_credentials'])
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': ' '.join(self._VALID_SCOPE),
'client_id': client.id
})
self._CLIENT_STORAGE.addClient(client)
result = self._TOKEN_RESOURCE.render_POST(request)
self.assertFailedTokenRequest(
request, result, UnauthorizedClientError('client_credentials'),
msg='Expected the resource token to reject a '
'client_credentials request with a public client.')
def testAuthorizedClientWithoutScope(self):
"""
Test that of a request without a scope is accepted
if the token resource has a default scope.
"""
defaultScope = ['default', 'scope']
accessToken = 'clientCredentialsAccessTokenWithoutScope'
tokenResource = TokenResource(
self._TOKEN_FACTORY, self._PERSISTENT_STORAGE, self._REFRESH_TOKEN_STORAGE,
self._AUTH_TOKEN_STORAGE, self._CLIENT_STORAGE, defaultScope=defaultScope,
passwordManager=self._PASSWORD_MANAGER)
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
}, authentication=self._VALID_CLIENT)
self._TOKEN_FACTORY.expectTokenRequest(
accessToken, tokenResource.authTokenLifeTime, self._VALID_CLIENT, defaultScope)
result = tokenResource.render_POST(request)
self._TOKEN_FACTORY.assertAllTokensRequested()
self.assertValidTokenResponse(request, result, accessToken, tokenResource.authTokenLifeTime,
expectedScope=defaultScope)
def testAuthorizedClientWithoutScopeNoDefault(self):
"""
Test the rejection of a request without a scope
when the token resource has no default scope.
"""
request = self.generateValidTokenRequest(arguments={'grant_type': 'client_credentials'},
authentication=self._VALID_CLIENT)
result = self._TOKEN_RESOURCE.render_POST(request)
self.assertFailedTokenRequest(
request, result, MissingParameterError('scope'),
msg='Expected the resource token to reject a client_credentials request '
'without a scope when no default scope is given.')
def testAuthorizedClientWithScope(self):
""" Test that a valid request is accepted. """
accessToken = 'clientCredentialsAccessToken'
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': ' '.join(self._VALID_SCOPE),
}, authentication=self._VALID_CLIENT)
self._TOKEN_FACTORY.expectTokenRequest(accessToken, self._TOKEN_RESOURCE.authTokenLifeTime,
self._VALID_CLIENT, self._VALID_SCOPE)
result = self._TOKEN_RESOURCE.render_POST(request)
self._TOKEN_FACTORY.assertAllTokensRequested()
self.assertValidTokenResponse(
request, result, accessToken, self._TOKEN_RESOURCE.authTokenLifeTime,
expectedScope=self._VALID_SCOPE)
def testAuthorizedClientWithMalformedScope(self):
""" Test the rejection of a request with a malformed scope parameters. """
malformedScope = b'malformedScope\xFF\xFF'
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': malformedScope,
}, authentication=self._VALID_CLIENT)
result = self._TOKEN_RESOURCE.render_POST(request)
self.assertFailedTokenRequest(
request, result, InvalidScopeError(malformedScope),
msg='Expected the resource token to reject a '
'client_credentials request with a malformed scope parameters.')
def testAuthorizedClientWithMultipleScope(self):
""" Test the rejection of a request with multiple scope parameters. """
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': self._VALID_SCOPE,
}, authentication=self._VALID_CLIENT)
result = self._TOKEN_RESOURCE.render_POST(request)
self.assertFailedTokenRequest(
request, result, MultipleParameterError('scope'),
msg='Expected the resource token to reject a '
'client_credentials request with multiple scope parameters.')
def testAuthorizedClientInvalidScope(self):
""" Test the rejection of a request with an invalid scope parameters. """
request = self.generateValidTokenRequest(arguments={
'grant_type': 'client_credentials',
'scope': ' '.join(self._VALID_SCOPE),
}, authentication=self._VALID_CLIENT)
self._TOKEN_FACTORY.expectTokenRequest(
'token', self._TOKEN_RESOURCE.authTokenLifeTime, self._VALID_CLIENT,
self._VALID_SCOPE, validScope=False)
result = self._TOKEN_RESOURCE.render_POST(request)
self._TOKEN_FACTORY.assertAllTokensRequested()
self.assertFailedTokenRequest(
request, result, InvalidScopeError(self._VALID_SCOPE),
msg='Expected the resource token to reject a '
'client_credentials request with invalid scope parameters.')
| [
"txoauth2.errors.MultipleParameterError",
"txoauth2.errors.InvalidScopeError",
"txoauth2.token.TokenResource",
"tests.getTestPasswordClient",
"txoauth2.errors.UnauthorizedClientError",
"txoauth2.clients.PublicClient",
"txoauth2.errors.MissingParameterError"
] | [((748, 842), 'tests.getTestPasswordClient', 'getTestPasswordClient', (['"""unauthorizedClientCredentialsGrantClient"""'], {'authorizedGrantTypes': '[]'}), "('unauthorizedClientCredentialsGrantClient',\n authorizedGrantTypes=[])\n", (769, 842), False, 'from tests import getTestPasswordClient\n'), ((1578, 1695), 'txoauth2.clients.PublicClient', 'PublicClient', (['"""unauthorizedClientCredentialsGrantClient"""', "['https://return.nonexistent']", "['client_credentials']"], {}), "('unauthorizedClientCredentialsGrantClient', [\n 'https://return.nonexistent'], ['client_credentials'])\n", (1590, 1695), False, 'from txoauth2.clients import PublicClient\n'), ((2590, 2803), 'txoauth2.token.TokenResource', 'TokenResource', (['self._TOKEN_FACTORY', 'self._PERSISTENT_STORAGE', 'self._REFRESH_TOKEN_STORAGE', 'self._AUTH_TOKEN_STORAGE', 'self._CLIENT_STORAGE'], {'defaultScope': 'defaultScope', 'passwordManager': 'self._PASSWORD_MANAGER'}), '(self._TOKEN_FACTORY, self._PERSISTENT_STORAGE, self.\n _REFRESH_TOKEN_STORAGE, self._AUTH_TOKEN_STORAGE, self._CLIENT_STORAGE,\n defaultScope=defaultScope, passwordManager=self._PASSWORD_MANAGER)\n', (2603, 2803), False, 'from txoauth2.token import TokenResource\n'), ((1245, 1290), 'txoauth2.errors.UnauthorizedClientError', 'UnauthorizedClientError', (['"""client_credentials"""'], {}), "('client_credentials')\n", (1268, 1290), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n'), ((2100, 2145), 'txoauth2.errors.UnauthorizedClientError', 'UnauthorizedClientError', (['"""client_credentials"""'], {}), "('client_credentials')\n", (2123, 2145), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n'), ((3901, 3931), 'txoauth2.errors.MissingParameterError', 'MissingParameterError', (['"""scope"""'], {}), "('scope')\n", (3922, 3931), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n'), ((5419, 5452), 'txoauth2.errors.InvalidScopeError', 'InvalidScopeError', (['malformedScope'], {}), '(malformedScope)\n', (5436, 5452), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n'), ((6050, 6081), 'txoauth2.errors.MultipleParameterError', 'MultipleParameterError', (['"""scope"""'], {}), "('scope')\n", (6072, 6081), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n'), ((6916, 6952), 'txoauth2.errors.InvalidScopeError', 'InvalidScopeError', (['self._VALID_SCOPE'], {}), '(self._VALID_SCOPE)\n', (6933, 6952), False, 'from txoauth2.errors import UnauthorizedClientError, MissingParameterError, MultipleParameterError, InvalidScopeError\n')] |
from __future__ import annotations
import requests
from requests.auth import HTTPBasicAuth
from dataclasses import dataclass, asdict
from mashumaro import DataClassJSONMixin
@dataclass
class Response(DataClassJSONMixin):
statusCode: int = 200
body: str = ''
@classmethod
def of(cls, status_code: int, msg: Message) -> Response:
return Response(status_code, msg.to_json())
def respond(self) -> dict:
return asdict(self)
@dataclass
class Message(DataClassJSONMixin):
message: str
def say_hello(msg: Message) -> dict:
resp = requests.post(
'https://httpbin.org/post',
json=msg.to_dict(),
auth=HTTPBasicAuth('username', 'password'),
verify=False,
timeout=2)
try:
return resp.json()['json']
except Exception as e :
return { 'msg': f'No body in response {e} -> {resp.text}' }
def handler(event: dict, context) -> dict:
try:
payload: dict = say_hello(Message("Hello World"))
payload.update({'message': f"Received from httpbin: {payload['message']}"})
msg: Message = Message.from_dict(payload)
return Response.of(200, msg).respond()
except Exception as e:
return Response.of(500, Message(str(e))).respond()
| [
"requests.auth.HTTPBasicAuth",
"dataclasses.asdict"
] | [((446, 458), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (452, 458), False, 'from dataclasses import dataclass, asdict\n'), ((666, 703), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""username"""', '"""password"""'], {}), "('username', 'password')\n", (679, 703), False, 'from requests.auth import HTTPBasicAuth\n')] |
import typer
from pathlib import Path
import yaml
from scTenifold import scTenifoldNet, scTenifoldKnk
app = typer.Typer()
@app.command(name="config")
def get_config_file(
config_type: int = typer.Option(1, "--type", "-t",
help="Type, 1: scTenifoldNet, 2: scTenifoldKnk",
min=1, max=2),
file_path: str = typer.Option(
".config.yml",
"--path",
"-p",
help="Path to generate empty config file")):
config = scTenifoldNet.get_empty_config() if config_type == 1 else scTenifoldKnk.get_empty_config()
with open(Path(file_path), 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=False)
@app.command(name="net")
def build_net(config_file_path: str = typer.Option(...,
"--config",
"-c",
help="Loaded config file's path"),
output_dir_path: str = typer.Option("./saved_net",
"--output",
"-o",
help="Output folder containing all analysis results"),
):
with open(Path(config_file_path), "r") as f:
data = yaml.safe_load(f)
sc = scTenifoldNet.load_config(config=data)
sc.build()
sc.save(output_dir_path)
@app.command(name="knk")
def build_net(config_file_path: str = typer.Option(...,
"--config",
"-c",
help="Loaded config file's path"),
output_dir_path: str = typer.Option("./saved_knk",
"--output",
"-o",
help="Output folder containing all analysis results"),
):
with open(Path(config_file_path), "r") as f:
data = yaml.safe_load(f)
sc = scTenifoldKnk.load_config(config=data)
sc.build()
sc.save(output_dir_path)
if __name__ == '__main__':
app() | [
"scTenifold.scTenifoldKnk.load_config",
"yaml.dump",
"typer.Option",
"pathlib.Path",
"scTenifold.scTenifoldKnk.get_empty_config",
"typer.Typer",
"yaml.safe_load",
"scTenifold.scTenifoldNet.get_empty_config",
"scTenifold.scTenifoldNet.load_config"
] | [((109, 122), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (120, 122), False, 'import typer\n'), ((201, 300), 'typer.Option', 'typer.Option', (['(1)', '"""--type"""', '"""-t"""'], {'help': '"""Type, 1: scTenifoldNet, 2: scTenifoldKnk"""', 'min': '(1)', 'max': '(2)'}), "(1, '--type', '-t', help=\n 'Type, 1: scTenifoldNet, 2: scTenifoldKnk', min=1, max=2)\n", (213, 300), False, 'import typer\n'), ((402, 493), 'typer.Option', 'typer.Option', (['""".config.yml"""', '"""--path"""', '"""-p"""'], {'help': '"""Path to generate empty config file"""'}), "('.config.yml', '--path', '-p', help=\n 'Path to generate empty config file')\n", (414, 493), False, 'import typer\n'), ((866, 935), 'typer.Option', 'typer.Option', (['...', '"""--config"""', '"""-c"""'], {'help': '"""Loaded config file\'s path"""'}), '(..., \'--config\', \'-c\', help="Loaded config file\'s path")\n', (878, 935), False, 'import typer\n'), ((1127, 1231), 'typer.Option', 'typer.Option', (['"""./saved_net"""', '"""--output"""', '"""-o"""'], {'help': '"""Output folder containing all analysis results"""'}), "('./saved_net', '--output', '-o', help=\n 'Output folder containing all analysis results')\n", (1139, 1231), False, 'import typer\n'), ((1486, 1524), 'scTenifold.scTenifoldNet.load_config', 'scTenifoldNet.load_config', ([], {'config': 'data'}), '(config=data)\n', (1511, 1524), False, 'from scTenifold import scTenifoldNet, scTenifoldKnk\n'), ((1634, 1703), 'typer.Option', 'typer.Option', (['...', '"""--config"""', '"""-c"""'], {'help': '"""Loaded config file\'s path"""'}), '(..., \'--config\', \'-c\', help="Loaded config file\'s path")\n', (1646, 1703), False, 'import typer\n'), ((1895, 1999), 'typer.Option', 'typer.Option', (['"""./saved_knk"""', '"""--output"""', '"""-o"""'], {'help': '"""Output folder containing all analysis results"""'}), "('./saved_knk', '--output', '-o', help=\n 'Output folder containing all analysis results')\n", (1907, 1999), False, 'import typer\n'), ((2254, 2292), 'scTenifold.scTenifoldKnk.load_config', 'scTenifoldKnk.load_config', ([], {'config': 'data'}), '(config=data)\n', (2279, 2292), False, 'from scTenifold import scTenifoldNet, scTenifoldKnk\n'), ((601, 633), 'scTenifold.scTenifoldNet.get_empty_config', 'scTenifoldNet.get_empty_config', ([], {}), '()\n', (631, 633), False, 'from scTenifold import scTenifoldNet, scTenifoldKnk\n'), ((659, 691), 'scTenifold.scTenifoldKnk.get_empty_config', 'scTenifoldKnk.get_empty_config', ([], {}), '()\n', (689, 691), False, 'from scTenifold import scTenifoldNet, scTenifoldKnk\n'), ((748, 800), 'yaml.dump', 'yaml.dump', (['config', 'outfile'], {'default_flow_style': '(False)'}), '(config, outfile, default_flow_style=False)\n', (757, 800), False, 'import yaml\n'), ((1459, 1476), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1473, 1476), False, 'import yaml\n'), ((2227, 2244), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2241, 2244), False, 'import yaml\n'), ((706, 721), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (710, 721), False, 'from pathlib import Path\n'), ((1409, 1431), 'pathlib.Path', 'Path', (['config_file_path'], {}), '(config_file_path)\n', (1413, 1431), False, 'from pathlib import Path\n'), ((2177, 2199), 'pathlib.Path', 'Path', (['config_file_path'], {}), '(config_file_path)\n', (2181, 2199), False, 'from pathlib import Path\n')] |
from singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods import UmiBarcodeDemuxMethod
class chrom10x_c16_u12(UmiBarcodeDemuxMethod):
def __init__(self, barcodeFileParser, **kwargs):
self.barcodeFileAlias = '10x_3M-february-2018'
UmiBarcodeDemuxMethod.__init__(
self,
umiRead=0,
umiStart=16,
umiLength=12,
barcodeRead=0,
barcodeStart=0,
barcodeLength=16,
random_primer_read=None,
random_primer_length=None,
barcodeFileAlias=self.barcodeFileAlias,
barcodeFileParser=barcodeFileParser,
**kwargs)
self.shortName = 'CHROMC16U12'
self.longName = 'Chromium 10x, CB: 16bp, UMI: 12bp'
self.autoDetectable = False
self.description = 'R1 starts with a 16bp cell barcode followed by a 12bp UMI.'
| [
"singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods.UmiBarcodeDemuxMethod.__init__"
] | [((263, 538), 'singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods.UmiBarcodeDemuxMethod.__init__', 'UmiBarcodeDemuxMethod.__init__', (['self'], {'umiRead': '(0)', 'umiStart': '(16)', 'umiLength': '(12)', 'barcodeRead': '(0)', 'barcodeStart': '(0)', 'barcodeLength': '(16)', 'random_primer_read': 'None', 'random_primer_length': 'None', 'barcodeFileAlias': 'self.barcodeFileAlias', 'barcodeFileParser': 'barcodeFileParser'}), '(self, umiRead=0, umiStart=16, umiLength=12,\n barcodeRead=0, barcodeStart=0, barcodeLength=16, random_primer_read=\n None, random_primer_length=None, barcodeFileAlias=self.barcodeFileAlias,\n barcodeFileParser=barcodeFileParser, **kwargs)\n', (293, 538), False, 'from singlecellmultiomics.modularDemultiplexer.baseDemultiplexMethods import UmiBarcodeDemuxMethod\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# read CSV data into a "dataframe" - pandas can parse dates
# this will be familiar to R users (not so much matlab users)
df = pd.read_csv('data/SHA.csv', index_col=0, parse_dates=True)
Q = df.SHA_INFLOW_CFS # a pandas series (daily)
# Q = Q.resample('AS-OCT').sum() # annual values
print(Q.autocorr(lag=1))
# plot a correlogram with confidence bounds
pd.plotting.autocorrelation_plot(Q)
plt.xlim([0,365])
plt.show()
from statsmodels.tsa import stattools
pacf,ci = stattools.pacf(Q, nlags=7, alpha=0.05)
plt.plot(pacf, linewidth=2)
plt.plot(ci, linestyle='dashed', color='0.5')
plt.show()
# we did this with pandas to simplify the resampling operations
# but we can also do it with numpy
# (using annual flow values)
Q = df.SHA_INFLOW_CFS.resample('AS-OCT').sum().values # now a numpy array
def autocorr(x,k):
return np.corrcoef(x[:len(x)-k], x[k:])[0,1]
print(autocorr(Q,k=1))
| [
"pandas.read_csv",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"pandas.plotting.autocorrelation_plot",
"statsmodels.tsa.stattools.pacf"
] | [((200, 258), 'pandas.read_csv', 'pd.read_csv', (['"""data/SHA.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('data/SHA.csv', index_col=0, parse_dates=True)\n", (211, 258), True, 'import pandas as pd\n'), ((427, 462), 'pandas.plotting.autocorrelation_plot', 'pd.plotting.autocorrelation_plot', (['Q'], {}), '(Q)\n', (459, 462), True, 'import pandas as pd\n'), ((463, 481), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 365]'], {}), '([0, 365])\n', (471, 481), True, 'import matplotlib.pyplot as plt\n'), ((481, 491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (489, 491), True, 'import matplotlib.pyplot as plt\n'), ((541, 579), 'statsmodels.tsa.stattools.pacf', 'stattools.pacf', (['Q'], {'nlags': '(7)', 'alpha': '(0.05)'}), '(Q, nlags=7, alpha=0.05)\n', (555, 579), False, 'from statsmodels.tsa import stattools\n'), ((580, 607), 'matplotlib.pyplot.plot', 'plt.plot', (['pacf'], {'linewidth': '(2)'}), '(pacf, linewidth=2)\n', (588, 607), True, 'import matplotlib.pyplot as plt\n'), ((608, 653), 'matplotlib.pyplot.plot', 'plt.plot', (['ci'], {'linestyle': '"""dashed"""', 'color': '"""0.5"""'}), "(ci, linestyle='dashed', color='0.5')\n", (616, 653), True, 'import matplotlib.pyplot as plt\n'), ((654, 664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (662, 664), True, 'import matplotlib.pyplot as plt\n')] |
import json
import os
import unittest
from functools import wraps
import mock
from lambdas import my_pi_lambda
EVENTS = json.load(open(os.path.join(os.path.dirname(__file__), 'sample_events.json')))
def forall_events(f):
@wraps(f)
def wrapper(*args, **kwds):
for event_meta in EVENTS:
kwds['event'] = event_meta['event']
kwds['event_name'] = event_meta['name']
return f(*args, **kwds)
return wrapper
def find_event_by_name(name):
for event_meta in EVENTS:
if event_meta['name'] == name:
return event_meta['event']
raise KeyError(name)
class TestEventParsing(unittest.TestCase):
@forall_events
def test_get_intent(self, event, event_name):
intent = my_pi_lambda.get_intent(event)
self.assertTrue(intent.endswith('Intent'), 'Failed to parse intent in event {}'.format(event_name))
def test_get_intent_failure(self):
event = {
'request': {
'type': 'NotIntent'
}
}
with self.assertRaises(ValueError):
intent = my_pi_lambda.get_intent(event)
@forall_events
def test_get_slots(self, event, event_name):
slots = my_pi_lambda.get_slots(event)
self.assertIsInstance(slots, dict, 'Failed to parse slots in event {}'.format(event_name))
def test_get_slots_play_3(self):
event = find_event_by_name('PLAY_3')
slots = my_pi_lambda.get_slots(event)
self.assertEqual(slots, {'Number': '3'})
class TestPolishRadio(unittest.TestCase):
def setUp(self):
self.event = find_event_by_name('PLAY_3')
def test_play(self):
with mock.patch.object(my_pi_lambda.PiController, 'request_method') as request_mock:
res = my_pi_lambda.lambda_handler(self.event, None)
request_mock.assert_called()
self.assertEqual(res['version'], '1.0')
self.assertIn('response', res)
if __name__ == "__main__":
unittest.main()
| [
"lambdas.my_pi_lambda.get_slots",
"lambdas.my_pi_lambda.lambda_handler",
"functools.wraps",
"mock.patch.object",
"os.path.dirname",
"unittest.main",
"lambdas.my_pi_lambda.get_intent"
] | [((231, 239), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (236, 239), False, 'from functools import wraps\n'), ((1984, 1999), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1997, 1999), False, 'import unittest\n'), ((757, 787), 'lambdas.my_pi_lambda.get_intent', 'my_pi_lambda.get_intent', (['event'], {}), '(event)\n', (780, 787), False, 'from lambdas import my_pi_lambda\n'), ((1220, 1249), 'lambdas.my_pi_lambda.get_slots', 'my_pi_lambda.get_slots', (['event'], {}), '(event)\n', (1242, 1249), False, 'from lambdas import my_pi_lambda\n'), ((1448, 1477), 'lambdas.my_pi_lambda.get_slots', 'my_pi_lambda.get_slots', (['event'], {}), '(event)\n', (1470, 1477), False, 'from lambdas import my_pi_lambda\n'), ((151, 176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((1104, 1134), 'lambdas.my_pi_lambda.get_intent', 'my_pi_lambda.get_intent', (['event'], {}), '(event)\n', (1127, 1134), False, 'from lambdas import my_pi_lambda\n'), ((1681, 1743), 'mock.patch.object', 'mock.patch.object', (['my_pi_lambda.PiController', '"""request_method"""'], {}), "(my_pi_lambda.PiController, 'request_method')\n", (1698, 1743), False, 'import mock\n'), ((1779, 1824), 'lambdas.my_pi_lambda.lambda_handler', 'my_pi_lambda.lambda_handler', (['self.event', 'None'], {}), '(self.event, None)\n', (1806, 1824), False, 'from lambdas import my_pi_lambda\n')] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
from webdriver_manager.chrome import ChromeDriverManager
import re
from itertools import groupby
import pandas as pd
import googlemaps
browser = webdriver.Chrome(ChromeDriverManager().install())
url = 'https://www.vivareal.com.br/aluguel/santa-catarina/florianopolis/apartamento_residencial/#onde=BR-Santa_Catarina-NULL-Florianopolis&tipos=apartamento_residencial'
browser.get(url)
API_key = 'INSERIR API KEY'
gmaps = googlemaps.Client(key=API_key)
quartos = []
tamanho = []
garagem = []
suite = []
mobiliado = []
prices = []
endereco = []
bairro = []
suites = []
banheiros = []
areaServico = []
churrasqueira = []
varanda = []
lavanderia = []
playground = []
arCondicionado = []
salaoFestas = []
piscina = []
distancia = []
for idxPage in range(2,75): # sub 15 para todas as paginas de Florianopolis
print('Iniciando Pagina ' + str(idxPage))
soup = BeautifulSoup(browser.page_source, 'html.parser')
# Tamanho
spans = soup.find_all('span', {'class' : 'property-card__detail-value js-property-card-value property-card__detail-area js-property-card-detail-area'})
spans = spans[0:35]
for span in spans:
temp = span.get_text()
tamanho.append([int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit][0])
# Apartamentos
listagems = soup.find_all('div',{'class': 'property-card__main-content'})
listagems = listagems[0:35]
for l in listagems:
# Tamanho:
type = ('span','class')
property = 'property-card__detail-value js-property-card-value property-card__detail-area js-property-card-detail-area'
temp = l.find_all(type[0], {type[1] : property})
if temp:
temp = temp[0].contents[0]
temp = [int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit]
if temp:
temp = temp[0]
else:
temp = 0
tamanho.append(temp)
else:
tamanho.append(0)
# Quartos
type = ('li','class')
property = 'property-card__detail-item property-card__detail-room js-property-detail-rooms'
temp = l.find_all(type[0], {type[1] : property})
if temp:
temp = temp[0].contents[1].contents[0]
temp = [int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit]
if temp:
temp = temp[0]
else:
temp = 0
quartos.append(temp)
else:
quartos.append(0)
# Suites
type = ('li', 'class')
property = 'property-card__detail-item property-card__detail-item-extra js-property-detail-suites'
temp = l.find_all(type[0], {type[1]: property})
if temp:
temp = temp[0].contents[1].contents[0]
temp = [int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit]
if temp:
temp = temp[0]
else:
temp = 0
suites.append(temp)
else:
suites.append(0)
# Banheiros
type = ('li', 'class')
property = 'property-card__detail-item property-card__detail-bathroom js-property-detail-bathroom'
temp = l.find_all(type[0], {type[1]: property})
if temp:
temp = temp[0].contents[1].contents[0]
temp = [int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit]
if temp:
temp = temp[0]
else:
temp = 0
banheiros.append(temp)
else:
banheiros.append(0)
# Garagem
type = ('li', 'class')
property = 'property-card__detail-item property-card__detail-garage js-property-detail-garages'
temp = l.find_all(type[0], {type[1]: property})
if temp:
temp = temp[0].contents[1].contents[0]
temp = [int(''.join(i)) for is_digit, i in groupby(temp, str.isdigit) if is_digit]
if temp:
temp = temp[0]
else:
temp = 0
garagem.append(temp)
else:
garagem.append(0)
# Amenities
type = ('ul', 'class')
property = 'property-card__amenities'
temp = l.find_all(type[0], {type[1]: property})
temp = str(temp)
if 'Área de serviço' in temp:
areaServico.append(True)
else:
areaServico.append(False)
if 'Churrasqueira' in temp:
churrasqueira.append(True)
else:
churrasqueira.append(False)
if 'Varanda' in temp:
varanda.append(True)
else:
varanda.append(False)
if 'Mobiliado' in temp:
mobiliado.append(True)
else:
mobiliado.append(False)
if 'Lavanderia' in temp:
lavanderia.append(True)
else:
lavanderia.append(False)
if 'Playground' in temp:
playground.append(True)
else:
playground.append(False)
if 'Salão de festas' in temp:
salaoFestas.append(True)
else:
salaoFestas.append(False)
if 'Ar-condicionado' in temp:
arCondicionado.append(True)
else:
arCondicionado.append(False)
if 'Piscina' in temp:
piscina.append(True)
else:
piscina.append(False)
# Endereço
type = ('span', 'class')
property = 'poi__address'
temp = l.find_all(type[0], {type[1]: property})
if temp[0].contents:
temp = temp[0].contents[0]
endereco.append(temp)
my_dist = gmaps.distance_matrix(temp, 'Shopping Beira Mar - Florianopolis')['rows'][0]['elements'][0]
if my_dist['status'] == 'NOT_FOUND':
distancia.append('NaN')
else:
distancia.append(my_dist['distance']['text'])
str2Find = ' - '
if temp.count(str2Find) <= 1:
str2Find = ','
idx1 = temp.find(str2Find)
temp = temp[:idx1]
else:
idx0 = temp.find(str2Find)
temp = temp[idx0+3:]
str2Find = ','
idx1 = temp.find(str2Find)
temp = temp[:idx1]
bairro.append(temp)
else:
endereco.append(0)
bairro.append(0)
distancia.append('NaN')
# Preço
type = ('div', 'class')
property = 'property-card__price js-property-card-prices js-property-card__price-small'
temp = l.find_all(type[0], {type[1]: property})
if temp:
temp = temp[0].contents[0]
str2Find = 'R$'
idx0 = temp.find(str2Find)
temp = temp[idx0+3:]
myString = temp
stringParts = myString.split(".")
newString = "".join(stringParts)
temp = float(newString)
prices.append(temp)
else:
prices.append(0)
newURL = 'https://www.vivareal.com.br/aluguel/santa-catarina/florianopolis/apartamento_residencial/?pagina='+str(idxPage)
browser.get(newURL)
time.sleep(2) # seconds
browser.close()
df = pd.DataFrame(list(zip(tamanho, quartos,suites,banheiros,garagem,areaServico,churrasqueira,varanda,mobiliado,lavanderia,playground,salaoFestas,arCondicionado,piscina,endereco,bairro,prices)),
columns =['Tamanho', 'Quartos','Suites','Banheiros','Garagem','AreaServico','Churrasqueira','Varanda','Mobiliado','Lavanderia','Playground','SalaoFestas','ArCondicionado','Piscina','Endereco','Bairro','Prices'])
df.to_csv('RealFlorianopolis', index=False) | [
"itertools.groupby",
"googlemaps.Client",
"time.sleep",
"bs4.BeautifulSoup",
"webdriver_manager.chrome.ChromeDriverManager"
] | [((554, 584), 'googlemaps.Client', 'googlemaps.Client', ([], {'key': 'API_key'}), '(key=API_key)\n', (571, 584), False, 'import googlemaps\n'), ((1020, 1069), 'bs4.BeautifulSoup', 'BeautifulSoup', (['browser.page_source', '"""html.parser"""'], {}), "(browser.page_source, 'html.parser')\n", (1033, 1069), False, 'from bs4 import BeautifulSoup\n'), ((7593, 7606), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7603, 7606), False, 'import time\n'), ((294, 315), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (313, 315), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((1944, 1970), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (1951, 1970), False, 'from itertools import groupby\n'), ((2504, 2530), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (2511, 2530), False, 'from itertools import groupby\n'), ((3070, 3096), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (3077, 3096), False, 'from itertools import groupby\n'), ((3633, 3659), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (3640, 3659), False, 'from itertools import groupby\n'), ((4197, 4223), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (4204, 4223), False, 'from itertools import groupby\n'), ((1385, 1411), 'itertools.groupby', 'groupby', (['temp', 'str.isdigit'], {}), '(temp, str.isdigit)\n', (1392, 1411), False, 'from itertools import groupby\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 12:41:40 2021
@author: jtm545
"""
#%%
import sys
sys.path.insert(0, '../')
import random
from colour.plotting import plot_chromaticity_diagram_CIE1931
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.optimize import minimize
from silentsub.problem import SilentSubstitutionProblem
from silentsub.colorfunc import LMS_to_xyY, xyY_to_LMS
from silentsub.plotting import stim_plot
sns.set_context('notebook')
sns.set_style('whitegrid')
#%%
spds = pd.read_csv('../data/S2_corrected_oo_spectra.csv', index_col=['led','intensity'])
spds.index.rename(['Primary', 'Setting'], inplace=True)
spds.columns = pd.Int64Index(spds.columns.astype(int))
spds.columns.name = 'Wavelength'
#%%
# list of colors for the primaries
colors = ['blueviolet', 'royalblue', 'darkblue', 'blue', 'cyan',
'green', 'lime', 'orange', 'red', 'darkred']
#%% Test pseudo inverse
ss = SilentSubstitutionProblem(
resolutions=[4095]*10,
colors=colors,
spds=spds,
spd_binwidth=1,
isolate=['S'],
silence=['I', 'M', 'L'],
target_contrast=.5
)
bg = [.5 for val in range(10)]
contrasts = [0.5, 0., 0., 0., 0.]
mod = ss.pseudo_inverse_contrast(bg, contrasts)
mod += bg
ss.predict_multiprimary_spd(mod, 'mod').plot(legend=True);
ss.predict_multiprimary_spd(bg, 'notmod').plot(legend=True);
ss.background=bg
#%%
constraints = [{
'type': 'eq',
'fun': ss.silencing_constraint
}]
result = minimize(
fun=ss.objective_function,
x0=mod,
args=(),
method='SLSQP',
jac=None,
hess=None,
hessp=None,
bounds=ss.bounds,
constraints=constraints,
tol=1e-08,
callback=None,
options={'disp': True},
)
ss.debug_callback_plot(result.x) | [
"sys.path.insert",
"pandas.read_csv",
"seaborn.set_context",
"scipy.optimize.minimize",
"seaborn.set_style",
"silentsub.problem.SilentSubstitutionProblem"
] | [((124, 149), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (139, 149), False, 'import sys\n'), ((492, 519), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (507, 519), True, 'import seaborn as sns\n'), ((520, 546), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (533, 546), True, 'import seaborn as sns\n'), ((560, 646), 'pandas.read_csv', 'pd.read_csv', (['"""../data/S2_corrected_oo_spectra.csv"""'], {'index_col': "['led', 'intensity']"}), "('../data/S2_corrected_oo_spectra.csv', index_col=['led',\n 'intensity'])\n", (571, 646), True, 'import pandas as pd\n'), ((978, 1140), 'silentsub.problem.SilentSubstitutionProblem', 'SilentSubstitutionProblem', ([], {'resolutions': '([4095] * 10)', 'colors': 'colors', 'spds': 'spds', 'spd_binwidth': '(1)', 'isolate': "['S']", 'silence': "['I', 'M', 'L']", 'target_contrast': '(0.5)'}), "(resolutions=[4095] * 10, colors=colors, spds=spds,\n spd_binwidth=1, isolate=['S'], silence=['I', 'M', 'L'], target_contrast=0.5\n )\n", (1003, 1140), False, 'from silentsub.problem import SilentSubstitutionProblem\n'), ((1514, 1717), 'scipy.optimize.minimize', 'minimize', ([], {'fun': 'ss.objective_function', 'x0': 'mod', 'args': '()', 'method': '"""SLSQP"""', 'jac': 'None', 'hess': 'None', 'hessp': 'None', 'bounds': 'ss.bounds', 'constraints': 'constraints', 'tol': '(1e-08)', 'callback': 'None', 'options': "{'disp': True}"}), "(fun=ss.objective_function, x0=mod, args=(), method='SLSQP', jac=\n None, hess=None, hessp=None, bounds=ss.bounds, constraints=constraints,\n tol=1e-08, callback=None, options={'disp': True})\n", (1522, 1717), False, 'from scipy.optimize import minimize\n')] |
"""
Exception handling used by **MSL-Qt**.
"""
import logging
import traceback
from . import QtWidgets, Qt, application
logger = logging.getLogger(__name__)
def excepthook(exc_type, exc_obj, exc_traceback):
"""Displays unhandled exceptions in a :class:`QtWidgets.QMessageBox`.
See :func:`sys.excepthook` for more details.
To implement the :func:`excepthook` in your own application include the following:
.. code-block:: python
import sys
from msl import qt
sys.excepthook = qt.excepthook
"""
def event_handler(e):
"""Resize the QMessageBox"""
result = QtWidgets.QMessageBox.event(msg, e)
detailed_text = msg.findChild(QtWidgets.QTextEdit)
if not detailed_text or not detailed_text.isVisible():
return result
detailed_text.setMaximumSize(QtWidgets.QWIDGETSIZE_MAX, QtWidgets.QWIDGETSIZE_MAX)
detailed_text.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
msg.setMaximumSize(QtWidgets.QWIDGETSIZE_MAX, QtWidgets.QWIDGETSIZE_MAX)
msg.setSizePolicy(Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding)
return result
details = ''.join(
['Traceback (most recent call last):\n'] +
traceback.format_tb(exc_traceback) +
[exc_obj.__class__.__name__ + ': ' + str(exc_obj)]
)
logger.error(details)
# ensure that a QApplication exists
app = application()
# add a prefix to the title bar
w = app.activeWindow()
prefix = 'MSL' if w is None or not w.windowTitle() else w.windowTitle()
msg = QtWidgets.QMessageBox()
# want to be able to resize the QMessageBox to allow for the
# DetailedText to be read easier
# see http://www.qtcentre.org/threads/24888-Resizing-a-QMessageBox
msg.event = event_handler
msg.setSizeGripEnabled(True)
msg.setWindowTitle(prefix + ' || Unhandled Exception')
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setText(exc_obj.__class__.__name__ + ':')
msg.setInformativeText(str(exc_obj))
msg.setDetailedText(details)
msg.raise_()
msg.exec_()
| [
"logging.getLogger",
"traceback.format_tb"
] | [((131, 158), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (148, 158), False, 'import logging\n'), ((1258, 1292), 'traceback.format_tb', 'traceback.format_tb', (['exc_traceback'], {}), '(exc_traceback)\n', (1277, 1292), False, 'import traceback\n')] |
import unittest
from calculator import multiply
class TestSomething(unittest.TestCase):
def test_multiply(self):
self.assertEqual(6, multiply(2,3))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"calculator.multiply"
] | [((181, 196), 'unittest.main', 'unittest.main', ([], {}), '()\n', (194, 196), False, 'import unittest\n'), ((137, 151), 'calculator.multiply', 'multiply', (['(2)', '(3)'], {}), '(2, 3)\n', (145, 151), False, 'from calculator import multiply\n')] |
#--SHAPES and TEXTS--#
import cv2
import numpy as np
#We are going to use the numpy library to create our matrix
#0 stands for black and 1 stands for white
img = np.zeros((512,512,3),np.uint8) # (height,width) and the channel, it gives us value range 0-255
#print(img)
#img[200:300,100:300] = 255,0,0 #whole image is img[:]
#the origin of the image is top left corner in OpenCV
cv2.line(img,(0,0),(img.shape[1],img.shape[0]),(0,255,0),3) #img.shape[1] is width and img.shape[0] is height. Now we got a diagonal line
cv2.line(img,(0,0),(300,300),(200,255,200),3) #image,start,end,color,thickness
cv2.rectangle(img,(0,0),(250,350),(0,0,255),2) #start,end,color,thickness etc. Write cv2.FILLED instead of thickness if you want to fill ur shape
cv2.circle(img,(450,50),30,(255,255,0),5) #center,radius,color,thickness
cv2.putText(img," OPENCV ", (300,100),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,150,0),3) #text,position,font,scale,color,thickness etc. Scale=bigness
cv2.imshow("Matrix", img)
cv2.waitKey(0) | [
"cv2.rectangle",
"cv2.line",
"cv2.putText",
"cv2.imshow",
"cv2.circle",
"numpy.zeros",
"cv2.waitKey"
] | [((173, 206), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (181, 206), True, 'import numpy as np\n'), ((395, 462), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(img.shape[1], img.shape[0])', '(0, 255, 0)', '(3)'], {}), '(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), 3)\n', (403, 462), False, 'import cv2\n'), ((534, 587), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(300, 300)', '(200, 255, 200)', '(3)'], {}), '(img, (0, 0), (300, 300), (200, 255, 200), 3)\n', (542, 587), False, 'import cv2\n'), ((614, 668), 'cv2.rectangle', 'cv2.rectangle', (['img', '(0, 0)', '(250, 350)', '(0, 0, 255)', '(2)'], {}), '(img, (0, 0), (250, 350), (0, 0, 255), 2)\n', (627, 668), False, 'import cv2\n'), ((761, 809), 'cv2.circle', 'cv2.circle', (['img', '(450, 50)', '(30)', '(255, 255, 0)', '(5)'], {}), '(img, (450, 50), 30, (255, 255, 0), 5)\n', (771, 809), False, 'import cv2\n'), ((835, 926), 'cv2.putText', 'cv2.putText', (['img', '""" OPENCV """', '(300, 100)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 150, 0)', '(3)'], {}), "(img, ' OPENCV ', (300, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,\n 150, 0), 3)\n", (846, 926), False, 'import cv2\n'), ((979, 1004), 'cv2.imshow', 'cv2.imshow', (['"""Matrix"""', 'img'], {}), "('Matrix', img)\n", (989, 1004), False, 'import cv2\n'), ((1008, 1022), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1019, 1022), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from meta.basic.common import RPCMixIn
class _BasicApi(RPCMixIn):
def get_lineage(self, params):
try:
query_params = {
"entity_type": params["type_name"],
"attr_value": params["qualified_name"],
}
for key in params:
if key in ("direction", "depth", "backend_type", "only_user_entity"):
query_params[key] = params[key]
if key == "extra_retrieve":
query_params[key] = json.loads(params[key])
ret = self.entity_query_lineage(**query_params)
return ret.result
except Exception as e:
raise e
def get_complex_search(self, statement, backend_type="mysql", **kwargs):
query_params = {
"token_pkey": kwargs.get("token_pkey", ""),
"token_msg": kwargs.get("token_msg", ""),
}
try:
rpc_response = self.entity_complex_search(statement, backend_type=backend_type, **query_params)
return rpc_response.result
except Exception as e:
raise e
BasicApi = _BasicApi()
| [
"json.loads"
] | [((1903, 1926), 'json.loads', 'json.loads', (['params[key]'], {}), '(params[key])\n', (1913, 1926), False, 'import json\n')] |
# coding: utf-8
"""
Fulfillment API
Use the Fulfillment API to complete the process of packaging, addressing, handling, and shipping each order on behalf of the seller, in accordance with the payment method and timing specified at checkout. # noqa: E501
OpenAPI spec version: v1.19.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EbayTaxReference(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None): # noqa: E501
"""EbayTaxReference - a model defined in Swagger""" # noqa: E501
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this EbayTaxReference. # noqa: E501
This field value is returned to indicate the VAT tax type, which will vary by country/region. This string value will be one of the following:<ul><li><code>ABN</code>: if this string is returned, the ID in the <strong>value</strong> field is an Australia tax ID</li><li><code>IOSS</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay EU or UK IOSS number</li><li><code>IRD</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay New Zealand tax ID</li><li><code>OSS</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay Germany VAT ID</li><li><code>VOEC</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay Norway tax ID</li></ul> # noqa: E501
:return: The name of this EbayTaxReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EbayTaxReference.
This field value is returned to indicate the VAT tax type, which will vary by country/region. This string value will be one of the following:<ul><li><code>ABN</code>: if this string is returned, the ID in the <strong>value</strong> field is an Australia tax ID</li><li><code>IOSS</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay EU or UK IOSS number</li><li><code>IRD</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay New Zealand tax ID</li><li><code>OSS</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay Germany VAT ID</li><li><code>VOEC</code>: if this string is returned, the ID in the <strong>value</strong> field is an eBay Norway tax ID</li></ul> # noqa: E501
:param name: The name of this EbayTaxReference. # noqa: E501
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this EbayTaxReference. # noqa: E501
The value returned in this field is the VAT identifier number (VATIN), which will vary by country/region. This field will be returned if VAT tax is applicable for the order. The <strong>name</strong> field indicates the VAT tax type, which will vary by country/region: <ul><li><strong>ABN</strong>: <em>eBay AU tax ID</em></li><li><strong>IOSS</strong>: <em>eBay EU IOSS number</em> / <em>eBay UK IOSS number</em></li><li><strong>IRD</strong>: <em>eBay NZ tax ID</em></li><li><strong>OSS</strong>: <em>eBay DE VAT ID</em></li><li><strong>VOEC</strong>: <em>eBay NO number</em></li></ul> # noqa: E501
:return: The value of this EbayTaxReference. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this EbayTaxReference.
The value returned in this field is the VAT identifier number (VATIN), which will vary by country/region. This field will be returned if VAT tax is applicable for the order. The <strong>name</strong> field indicates the VAT tax type, which will vary by country/region: <ul><li><strong>ABN</strong>: <em>eBay AU tax ID</em></li><li><strong>IOSS</strong>: <em>eBay EU IOSS number</em> / <em>eBay UK IOSS number</em></li><li><strong>IRD</strong>: <em>eBay NZ tax ID</em></li><li><strong>OSS</strong>: <em>eBay DE VAT ID</em></li><li><strong>VOEC</strong>: <em>eBay NO number</em></li></ul> # noqa: E501
:param value: The value of this EbayTaxReference. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EbayTaxReference, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EbayTaxReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((5200, 5233), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (5213, 5233), False, 'import six\n')] |
from unittest import TestCase
from datetime import datetime, timedelta
from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada
class TestTarefa(TestCase):
def test_tarefa_init(self):
tarefa = Tarefa('Lavar pratos')
self.assertEqual('Lavar pratos', tarefa.descricao)
self.assertFalse(tarefa.concluida)
self.assertIsNotNone(tarefa.criacao)
def test_tarefa_conclui(self):
tarefa = Tarefa('Lavar pratos')
tarefa.conclui()
self.assertTrue(tarefa.concluida)
def test_tarefa_str(self):
tarefa_1 = Tarefa('Lavar pratos')
tarefa_2 = Tarefa('Ir ao supermercado',
vencimento=datetime.now() - timedelta(days=1))
tarefa_3 = Tarefa('Estudar')
tarefa_3.conclui()
self.assertEqual('Lavar pratos', tarefa_1.__str__())
self.assertEqual('Ir ao supermercado (Vencida)', tarefa_2.__str__())
self.assertEqual('Estudar (Concluida)', tarefa_3.__str__())
def test_tarefa_str_concluida(self):
tarefa = Tarefa('Lavar pratos')
tarefa.conclui()
self.assertEqual('Lavar pratos (Concluida)', tarefa.__str__())
class TestProjeto(TestCase):
def test_projeto_init(self):
projeto = Projeto('Casa')
self.assertEqual('Casa', projeto.nome)
self.assertListEqual([], projeto.tarefas)
def test_projeto_adiciona_tarefa(self):
projeto = Projeto('Casa')
tarefa = Tarefa('lavar prato')
projeto.add(tarefa)
self.assertListEqual([tarefa], projeto.tarefas)
def test_projeto_adiciona_tarefa_passando_descricao(self):
projeto = Projeto('Casa')
projeto.add('lavar prato')
tarefa = projeto.tarefas[-1]
self.assertIsInstance(tarefa, Tarefa)
def test_projeto_iter(self):
projeto = Projeto('Casa')
projeto.add('lavar prato')
projeto.add('lavar roupa')
for tarefa in projeto:
self.assertIsInstance(tarefa, Tarefa)
def test_projeto_iadd(self):
projeto = Projeto('Casa')
lavar_prato = Tarefa('lavar prato')
projeto += lavar_prato
self.assertListEqual([lavar_prato], projeto.tarefas)
def test_projeto_pendentes(self):
projeto = Projeto('Casa')
projeto += 'lavar prato'
projeto += 'ir ao supermercado'
self.assertEqual(2, len(projeto.pendentes))
def test_projeto_procura(self):
projeto = Projeto('Casa')
projeto += 'lavar prato'
projeto += 'ir ao supermercado'
tarefa = projeto.procura('lavar prato')
self.assertEqual('lavar prato', tarefa.descricao)
def test_projeto_procura_quando_nao_for_encontrado(self):
projeto = Projeto('Casa')
projeto += 'lavar prato'
projeto += 'ir ao supermercado'
with self.assertRaises(TarefaNaoEncontrada):
projeto.procura('lavar pratos')
| [
"datetime.datetime.now",
"datetime.timedelta",
"tarefa_v2.Projeto",
"tarefa_v2.Tarefa"
] | [((210, 232), 'tarefa_v2.Tarefa', 'Tarefa', (['"""Lavar pratos"""'], {}), "('Lavar pratos')\n", (216, 232), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((434, 456), 'tarefa_v2.Tarefa', 'Tarefa', (['"""Lavar pratos"""'], {}), "('Lavar pratos')\n", (440, 456), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((577, 599), 'tarefa_v2.Tarefa', 'Tarefa', (['"""Lavar pratos"""'], {}), "('Lavar pratos')\n", (583, 599), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((741, 758), 'tarefa_v2.Tarefa', 'Tarefa', (['"""Estudar"""'], {}), "('Estudar')\n", (747, 758), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1051, 1073), 'tarefa_v2.Tarefa', 'Tarefa', (['"""Lavar pratos"""'], {}), "('Lavar pratos')\n", (1057, 1073), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1254, 1269), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (1261, 1269), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1431, 1446), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (1438, 1446), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1465, 1486), 'tarefa_v2.Tarefa', 'Tarefa', (['"""lavar prato"""'], {}), "('lavar prato')\n", (1471, 1486), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1655, 1670), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (1662, 1670), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((1844, 1859), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (1851, 1859), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((2065, 2080), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (2072, 2080), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((2104, 2125), 'tarefa_v2.Tarefa', 'Tarefa', (['"""lavar prato"""'], {}), "('lavar prato')\n", (2110, 2125), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((2277, 2292), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (2284, 2292), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((2475, 2490), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (2482, 2490), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((2754, 2769), 'tarefa_v2.Projeto', 'Projeto', (['"""Casa"""'], {}), "('Casa')\n", (2761, 2769), False, 'from tarefa_v2 import Tarefa, Projeto, TarefaNaoEncontrada\n'), ((685, 699), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (697, 699), False, 'from datetime import datetime, timedelta\n'), ((702, 719), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (711, 719), False, 'from datetime import datetime, timedelta\n')] |
import copy
import unittest
from keydra.config import KeydraConfig
from keydra.exceptions import ConfigException
from unittest.mock import MagicMock
from unittest.mock import patch
ENVS = {
'dev': {
'description': 'AWS Development Environment',
'type': 'aws',
'access': 'dev',
'id': '001122',
'secrets': [
'aws_deployments',
'splunk',
'aws_deployment_just_rotate',
'cloudflare_canary',
'okta_canary',
'office365_adhoc',
]
},
'uat': {
'description': 'AWS UAT Environment',
'type': 'aws',
'access': 'uat',
'id': '334455',
'secrets': ['aws_deployments']
},
'prod': {
'description': 'AWS Prod Environment',
'type': 'aws',
'access': 'production',
'id': 667788,
'secrets': ['aws_deployments']
},
'control': {
'description': 'AWS Master Environment',
'type': 'aws',
'access': 'production',
'id': 991122,
'secrets': ['aws_deployments']
},
}
SECRETS = {
'aws_deployments':
{
'key': 'km_managed_api_user',
'provider': 'IAM',
'rotate': 'nightly',
'distribute': [
{
'key': 'KM_{ENV}_AWS_ACCESS_ID',
'provider': 'bitbucket',
'source': 'key',
'envs': [
'*'
],
'config': {
'scope': 'account'
}
},
{
'key': 'KM_{ENV}_AWS_SECRET_ACCESS_KEY',
'provider': 'bitbucket',
'source': 'secret',
'envs': [
'*'
],
'config': {
'scope': 'account'
}
},
{
'key': 'KM_MANAGED_AWS_ACCESS_ID',
'provider': 'bitbucket',
'source': 'key',
'envs': [
'dev'
],
'config': {
'scope': 'account'
}
},
{
'key': 'KM_MANAGED_AWS_SECRET_ACCESS_KEY',
'provider': 'bitbucket',
'source': 'secret',
'envs': [
'dev'
],
'config': {
'scope': 'account'
}
}
]
},
'aws_deployment_just_rotate':
{
'key': 'km_managed_just_rotate',
'provider': 'IAM',
'rotate': 'nightly'
},
'splunk':
{
'key': 'splunk',
'provider': 'salesforce',
'rotate': 'monthly'
},
'cloudflare_canary':
{
'key': 'cloudflare_canary_key',
'provider': 'cloudflare',
'rotate': 'canaries'
},
'okta_canary':
{
'key': 'okta_canary_key',
'provider': 'okta',
'rotate': 'canaries'
},
'office365_adhoc':
{
'key': 'control_secrets',
'provider': 'office365',
'rotate': 'adhoc'
}
}
SECRETS_S = {
'splunk':
{
'key': 'splunk',
'provider': 'salesforce',
'rotate': 'monthly',
'distribute': [{'provider': 'secretsmanager', 'envs': ['dev']}]
}
}
ENV_CONFIG = {
'provider': 'bitbucket',
'config': {
'account_username': 'acct_user',
'secrets': {
'repository': 'secrets_repo',
'path': 'secrets_path',
'filetype': 'secrets_filetype'
},
'environments': {
'repository': 'envs_repo',
'path': 'envs_path',
'filetype': 'envs_filetype'
}
}
}
class TestConfig(unittest.TestCase):
def setUp(self):
self.session = MagicMock()
self.client = KeydraConfig(
session=self.session,
config=ENV_CONFIG
)
def test__dodgy_config(self):
with self.assertRaises(ConfigException):
KeydraConfig(
session=MagicMock(),
config={}
)
with self.assertRaises(ConfigException):
KeydraConfig(
session=MagicMock(),
config={'provider': {}}
)
def test__validate_spec_environments(self):
envs = copy.deepcopy(ENVS)
secrets = copy.deepcopy(SECRETS)
self.client._validate_spec(envs, secrets)
envs['prod'].pop('type')
with self.assertRaises(ConfigException):
self.client._validate_spec(envs, secrets)
envs = copy.deepcopy(ENVS)
envs['prod'].pop('id')
with self.assertRaises(ConfigException):
self.client._validate_spec(envs, secrets)
with self.assertRaises(ConfigException):
secrets = copy.deepcopy(SECRETS)
secrets['aws_deployments']['distribute'][0]['envs'] = ['notanenv']
self.client._validate_spec(ENVS, secrets)
def test__validate_spec_secrets(self):
envs = copy.deepcopy(ENVS)
secrets = copy.deepcopy(SECRETS)
self.client._validate_spec(envs, secrets)
secrets['aws_deployments'].pop('provider')
with self.assertRaises(ConfigException):
self.client._validate_spec(envs, secrets)
secrets = copy.deepcopy(SECRETS)
secrets['aws_deployments'].pop('key')
with self.assertRaises(ConfigException):
self.client._validate_spec(envs, secrets)
secrets = copy.deepcopy(SECRETS)
secrets['aws_deployments']['distribute'][0].pop('provider')
with self.assertRaises(ConfigException):
self.client._validate_spec(envs, secrets)
def test__guess_current_environment(self):
with patch.object(self.client, '_fetch_current_account') as mk_fca:
mk_fca.return_value = 334455
self.assertEquals(
self.client._guess_current_environment(ENVS), 'uat'
)
mk_fca.return_value = 667788
self.assertEquals(
self.client._guess_current_environment(ENVS), 'prod'
)
mk_fca.return_value = 999999
with self.assertRaises(ConfigException):
self.client._guess_current_environment(ENVS)
def test__filter(self):
with patch.object(
self.client, '_guess_current_environment'
) as mk_gce:
mk_gce.return_value = 'prod'
filtered = self.client._filter(ENVS, SECRETS, rotate='nightly')
self.assertEqual(len(filtered), 1)
self.assertEqual(len(filtered[0]['distribute']), 2)
mk_gce.return_value = 'dev'
filtered = self.client._filter(ENVS, SECRETS, rotate='nightly')
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0]['key'], 'km_managed_api_user')
self.assertEqual(len(filtered[0]['distribute']), 4)
mk_gce.return_value = 'dev'
filtered = self.client._filter(
ENVS, SECRETS, requested_secrets=[], rotate='nightly'
)
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0]['key'], 'km_managed_api_user')
self.assertEqual(len(filtered[0]['distribute']), 4)
mk_gce.return_value = 'dev'
filtered = self.client._filter(
ENVS,
SECRETS,
requested_secrets=['aws_deployment_just_rotate'],
rotate='nightly'
)
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0]['key'], 'km_managed_just_rotate')
mk_gce.return_value = 'dev'
filtered = self.client._filter(ENVS, SECRETS, rotate='monthly')
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0]['key'], 'splunk')
mk_gce.return_value = 'dev'
filtered = self.client._filter(ENVS, SECRETS_S, rotate='monthly')
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0]['key'], 'splunk')
self.assertEqual(
filtered[0]['distribute'][0]['provider'], 'secretsmanager'
)
mk_gce.return_value = 'dev'
filtered = self.client._filter(
ENVS, SECRETS_S, rotate='adhoc', requested_secrets=['splunk']
)
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0]['key'], 'splunk')
self.assertEqual(
filtered[0]['distribute'][0]['provider'], 'secretsmanager'
)
mk_gce.return_value = 'dev'
filtered = self.client._filter(ENVS, SECRETS, rotate='canaries')
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0]['key'], 'cloudflare_canary_key')
self.assertEqual(filtered[1]['key'], 'okta_canary_key')
@patch('keydra.loader.build_client')
def test_load_secret_specs(self, mk_bc):
with patch.object(self.client, '_filter') as mk_fba:
with patch.object(self.client, '_validate_spec') as mk_vc:
self.client.load_secrets()
mk_bc.assert_called_once_with(ENV_CONFIG['provider'], None)
mk_fba.assert_called()
mk_vc.assert_called()
| [
"copy.deepcopy",
"unittest.mock.MagicMock",
"keydra.config.KeydraConfig",
"unittest.mock.patch.object",
"unittest.mock.patch"
] | [((9066, 9101), 'unittest.mock.patch', 'patch', (['"""keydra.loader.build_client"""'], {}), "('keydra.loader.build_client')\n", (9071, 9101), False, 'from unittest.mock import patch\n'), ((3889, 3900), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3898, 3900), False, 'from unittest.mock import MagicMock\n'), ((3923, 3976), 'keydra.config.KeydraConfig', 'KeydraConfig', ([], {'session': 'self.session', 'config': 'ENV_CONFIG'}), '(session=self.session, config=ENV_CONFIG)\n', (3935, 3976), False, 'from keydra.config import KeydraConfig\n'), ((4429, 4448), 'copy.deepcopy', 'copy.deepcopy', (['ENVS'], {}), '(ENVS)\n', (4442, 4448), False, 'import copy\n'), ((4467, 4489), 'copy.deepcopy', 'copy.deepcopy', (['SECRETS'], {}), '(SECRETS)\n', (4480, 4489), False, 'import copy\n'), ((4695, 4714), 'copy.deepcopy', 'copy.deepcopy', (['ENVS'], {}), '(ENVS)\n', (4708, 4714), False, 'import copy\n'), ((5139, 5158), 'copy.deepcopy', 'copy.deepcopy', (['ENVS'], {}), '(ENVS)\n', (5152, 5158), False, 'import copy\n'), ((5177, 5199), 'copy.deepcopy', 'copy.deepcopy', (['SECRETS'], {}), '(SECRETS)\n', (5190, 5199), False, 'import copy\n'), ((5426, 5448), 'copy.deepcopy', 'copy.deepcopy', (['SECRETS'], {}), '(SECRETS)\n', (5439, 5448), False, 'import copy\n'), ((5619, 5641), 'copy.deepcopy', 'copy.deepcopy', (['SECRETS'], {}), '(SECRETS)\n', (5632, 5641), False, 'import copy\n'), ((4923, 4945), 'copy.deepcopy', 'copy.deepcopy', (['SECRETS'], {}), '(SECRETS)\n', (4936, 4945), False, 'import copy\n'), ((5876, 5927), 'unittest.mock.patch.object', 'patch.object', (['self.client', '"""_fetch_current_account"""'], {}), "(self.client, '_fetch_current_account')\n", (5888, 5927), False, 'from unittest.mock import patch\n'), ((6447, 6502), 'unittest.mock.patch.object', 'patch.object', (['self.client', '"""_guess_current_environment"""'], {}), "(self.client, '_guess_current_environment')\n", (6459, 6502), False, 'from unittest.mock import patch\n'), ((9160, 9196), 'unittest.mock.patch.object', 'patch.object', (['self.client', '"""_filter"""'], {}), "(self.client, '_filter')\n", (9172, 9196), False, 'from unittest.mock import patch\n'), ((9225, 9268), 'unittest.mock.patch.object', 'patch.object', (['self.client', '"""_validate_spec"""'], {}), "(self.client, '_validate_spec')\n", (9237, 9268), False, 'from unittest.mock import patch\n'), ((4145, 4156), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4154, 4156), False, 'from unittest.mock import MagicMock\n'), ((4298, 4309), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4307, 4309), False, 'from unittest.mock import MagicMock\n')] |
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, BatchNormalization, Lambda, Concatenate, Conv2DTranspose, Reshape, ReLU
from tensorflow.keras.applications import DenseNet121
# tf.config.experimental_run_functions_eagerly(True)
# with tf.device("/gpu:0"):
class Dronet(Model):
def __init__(self, num_outputs, include_top=True):
super(Dronet, self).__init__()
self.include_top = include_top
self.create_model(num_outputs)
@tf.function
def call(self, img):
# Input
# x = DenseNet121(include_top=self.include_top, weights=None, classes = 10) (img)
# model_d = DenseNet121(weights='imagenet', include_top=False, input_shape=(128, 128, 3))
model_d = DenseNet121(include_top=self.include_top, weights=None, classes=10)(img)
if self.include_top:
model_d = tf.keras.layers.Activation('relu')(model_d)
model_d = self.dense0(model_d)
model_d = self.dense1(model_d)
model_d = self.dense2(model_d)
return model_d
'''
model_d = tf.keras.layers.Activation('relu')(model_d)
model_d = self.dense0(model_d)
model_d = self.dense1(model_d)
gate_pose = self.dense2(model_d)
return gate_pose
'''
@tf.function
def create_model(self, num_outputs):
print('[Dronet] Starting dronet')
self.dense0 = tf.keras.layers.Dense(units=64, activation='relu')
self.dense1 = tf.keras.layers.Dense(units=32, activation='relu')
self.dense2 = tf.keras.layers.Dense(units=num_outputs, activation='linear')
'''
self.dense0 = DenseNet121(include_top=self.include_top, weights=None, classes=num_outputs)
self.dense1 = DenseNet121(include_top=self.include_top, weights=None, classes=num_outputs)
self.dense2 = DenseNet121(include_top=self.include_top, weights=None, classes=num_outputs)
'''
print('[Dronet] Done with dronet') | [
"tensorflow.keras.applications.DenseNet121",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Activation"
] | [((1471, 1521), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (1492, 1521), True, 'import tensorflow as tf\n'), ((1544, 1594), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(32)', 'activation': '"""relu"""'}), "(units=32, activation='relu')\n", (1565, 1594), True, 'import tensorflow as tf\n'), ((1617, 1678), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_outputs', 'activation': '"""linear"""'}), "(units=num_outputs, activation='linear')\n", (1638, 1678), True, 'import tensorflow as tf\n'), ((795, 862), 'tensorflow.keras.applications.DenseNet121', 'DenseNet121', ([], {'include_top': 'self.include_top', 'weights': 'None', 'classes': '(10)'}), '(include_top=self.include_top, weights=None, classes=10)\n', (806, 862), False, 'from tensorflow.keras.applications import DenseNet121\n'), ((919, 953), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (945, 953), True, 'import tensorflow as tf\n')] |
from box import Box
from src import repos
from src.processors import SelfIteratingProcessor
from src.processors import use_cases
def CallbackDelivery(config: Box = None):
use_case = use_cases.DeliverCallbackUseCase(
delivery_outbox_repo=repos.DeliveryOutbox(config.DELIVERY_OUTBOX_REPO),
topic_base_self_url=config.TOPIC_BASE_SELF_URL,
channel_url=config.CHANNEL_URL
)
return SelfIteratingProcessor(use_case=use_case)
| [
"src.processors.SelfIteratingProcessor",
"src.repos.DeliveryOutbox"
] | [((414, 455), 'src.processors.SelfIteratingProcessor', 'SelfIteratingProcessor', ([], {'use_case': 'use_case'}), '(use_case=use_case)\n', (436, 455), False, 'from src.processors import SelfIteratingProcessor\n'), ((251, 300), 'src.repos.DeliveryOutbox', 'repos.DeliveryOutbox', (['config.DELIVERY_OUTBOX_REPO'], {}), '(config.DELIVERY_OUTBOX_REPO)\n', (271, 300), False, 'from src import repos\n')] |
from django.shortcuts import get_object_or_404, redirect, render
from feincms3.plugins import external, html, richtext
from feincms3.regions import Regions
from feincms3.renderer import TemplatePluginRenderer
from .models import HTML, External, Image, Page, RichText, Snippet
renderer = TemplatePluginRenderer()
renderer.register_string_renderer(RichText, richtext.render_richtext)
renderer.register_template_renderer(Image, "renderer/image.html")
Snippet.register_with(renderer)
renderer.register_string_renderer(External, external.render_external)
renderer.register_string_renderer(HTML, html.render_html)
def page_detail(request, path=None):
page = get_object_or_404(
Page.objects.active(), path=("/%s/" % path) if path else "/"
)
page.activate_language(request)
if page.redirect_to_url or page.redirect_to_page:
return redirect(page.redirect_to_url or page.redirect_to_page)
return render(
request,
page.template.template_name,
{
"page": page,
"regions": Regions.from_item(
item=page, renderer=renderer, inherit_from=page.ancestors().reverse()
),
},
)
| [
"django.shortcuts.redirect",
"feincms3.renderer.TemplatePluginRenderer"
] | [((291, 315), 'feincms3.renderer.TemplatePluginRenderer', 'TemplatePluginRenderer', ([], {}), '()\n', (313, 315), False, 'from feincms3.renderer import TemplatePluginRenderer\n'), ((862, 917), 'django.shortcuts.redirect', 'redirect', (['(page.redirect_to_url or page.redirect_to_page)'], {}), '(page.redirect_to_url or page.redirect_to_page)\n', (870, 917), False, 'from django.shortcuts import get_object_or_404, redirect, render\n')] |
import os
from django.conf import settings
from django.contrib.auth import logout as django_logout
from django.contrib.sites.models import Site
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import View, TemplateView
from luna_django_commons.app.mixins import get_login_context
from .forms import (
B2DropProviderForm,
DatafileForm,
DatafileUpdateForm,
DatasetAddFileForm,
DatasetForm,
DropboxProviderForm,
FolderForm,
ForgotPasswordForm,
GDriveProviderForm,
LoginForm,
PasswordChangeForm,
RegisterForm,
ResetPasswordForm,
S3ProviderForm,
WLWebdavProviderForm,
)
class Root(TemplateView):
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
domain = Site.objects.get_current().domain
if "west-life" in domain:
return ['static_pages/landing_westlife.html']
elif "pype" in domain:
return ['static_pages/landing_pype.html']
return ['static_pages/landing_westlife.html']
def westlife_services(request):
context = get_login_context(request)
return render(request, 'static_pages/westlife/services.html', context)
def legal(request):
context = get_login_context(request)
return render(request, 'static_pages/cgv.html', context)
def internet_explorer(request):
context = get_login_context(request)
return render(request, 'static_pages/internet_explorer.html', context)
def westlife_static_page(request, page_name='fweh.html', render_kwargs=None):
if render_kwargs is None:
render_kwargs = dict()
context = get_login_context(request)
return render(request, 'static_pages/westlife/%s' % page_name, context, **render_kwargs)
#
# Debug information
#
class BuildInfo(View):
def get(self, *args, **kwargs):
version_file_path = os.path.join(settings.BASE_DIR, 'build_info.txt')
try:
with open(version_file_path, 'r') as f:
data = f.read()
except IOError:
data = 'No build information found. Probably means we are in development mode.'
return HttpResponse(data, content_type='text/plain')
class MainPage(TemplateView):
template_name = 'main.html'
def get_context_data(self, **kwargs):
context = super(MainPage, self).get_context_data(**kwargs)
user = self.request.user
context.update({
'INTERCOM_APP_ID': settings.INTERCOM_APP_ID,
'b2dropprovider_form': B2DropProviderForm(),
'wlwebdavprovider_form': WLWebdavProviderForm(),
'change_password_form': PasswordChangeForm(user=user),
'datafile_form': DatafileForm(),
'datafile_update_form': DatafileUpdateForm(),
'dataset_add_file_form': DatasetAddFileForm(),
'dataset_form': DatasetForm(),
'dropboxprovider_form': DropboxProviderForm(),
'folder_form': FolderForm(),
'forgot_password_form': ForgotPasswordForm(),
'gdriveprovider_form': GDriveProviderForm(),
'login_form': LoginForm(),
'register_form': RegisterForm(),
'reset_password_form': ResetPasswordForm(),
's3provider_form': S3ProviderForm(),
})
return context
def whoami(request):
return HttpResponse(request.user.username)
def logout(request):
django_logout(request)
#return HttpResponse('Logged out!')
return redirect('home')
def switch_login(request):
next_url=request.GET.get('next', '/virtualfolder/')
if hasattr(settings, 'SAML_CONFIG'):
return redirect('/saml2/login/?next=%s' % next_url)
else:
return redirect('/accounts/login/?next=%s' % next_url)
| [
"django.shortcuts.render",
"django.http.HttpResponse",
"os.path.join",
"django.shortcuts.redirect",
"luna_django_commons.app.mixins.get_login_context",
"django.contrib.sites.models.Site.objects.get_current",
"django.contrib.auth.logout"
] | [((1258, 1284), 'luna_django_commons.app.mixins.get_login_context', 'get_login_context', (['request'], {}), '(request)\n', (1275, 1284), False, 'from luna_django_commons.app.mixins import get_login_context\n'), ((1296, 1359), 'django.shortcuts.render', 'render', (['request', '"""static_pages/westlife/services.html"""', 'context'], {}), "(request, 'static_pages/westlife/services.html', context)\n", (1302, 1359), False, 'from django.shortcuts import render, redirect\n'), ((1396, 1422), 'luna_django_commons.app.mixins.get_login_context', 'get_login_context', (['request'], {}), '(request)\n', (1413, 1422), False, 'from luna_django_commons.app.mixins import get_login_context\n'), ((1434, 1483), 'django.shortcuts.render', 'render', (['request', '"""static_pages/cgv.html"""', 'context'], {}), "(request, 'static_pages/cgv.html', context)\n", (1440, 1483), False, 'from django.shortcuts import render, redirect\n'), ((1532, 1558), 'luna_django_commons.app.mixins.get_login_context', 'get_login_context', (['request'], {}), '(request)\n', (1549, 1558), False, 'from luna_django_commons.app.mixins import get_login_context\n'), ((1570, 1633), 'django.shortcuts.render', 'render', (['request', '"""static_pages/internet_explorer.html"""', 'context'], {}), "(request, 'static_pages/internet_explorer.html', context)\n", (1576, 1633), False, 'from django.shortcuts import render, redirect\n'), ((1789, 1815), 'luna_django_commons.app.mixins.get_login_context', 'get_login_context', (['request'], {}), '(request)\n', (1806, 1815), False, 'from luna_django_commons.app.mixins import get_login_context\n'), ((1827, 1913), 'django.shortcuts.render', 'render', (['request', "('static_pages/westlife/%s' % page_name)", 'context'], {}), "(request, 'static_pages/westlife/%s' % page_name, context, **\n render_kwargs)\n", (1833, 1913), False, 'from django.shortcuts import render, redirect\n'), ((3502, 3537), 'django.http.HttpResponse', 'HttpResponse', (['request.user.username'], {}), '(request.user.username)\n', (3514, 3537), False, 'from django.http import HttpResponse\n'), ((3565, 3587), 'django.contrib.auth.logout', 'django_logout', (['request'], {}), '(request)\n', (3578, 3587), True, 'from django.contrib.auth import logout as django_logout\n'), ((3639, 3655), 'django.shortcuts.redirect', 'redirect', (['"""home"""'], {}), "('home')\n", (3647, 3655), False, 'from django.shortcuts import render, redirect\n'), ((2022, 2071), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""build_info.txt"""'], {}), "(settings.BASE_DIR, 'build_info.txt')\n", (2034, 2071), False, 'import os\n'), ((2301, 2346), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""text/plain"""'}), "(data, content_type='text/plain')\n", (2313, 2346), False, 'from django.http import HttpResponse\n'), ((3798, 3842), 'django.shortcuts.redirect', 'redirect', (["('/saml2/login/?next=%s' % next_url)"], {}), "('/saml2/login/?next=%s' % next_url)\n", (3806, 3842), False, 'from django.shortcuts import render, redirect\n'), ((3868, 3915), 'django.shortcuts.redirect', 'redirect', (["('/accounts/login/?next=%s' % next_url)"], {}), "('/accounts/login/?next=%s' % next_url)\n", (3876, 3915), False, 'from django.shortcuts import render, redirect\n'), ((945, 971), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (969, 971), False, 'from django.contrib.sites.models import Site\n')] |
#!/usr/bin/python3
import requests
import json
import searchguard.settings as settings
from searchguard.exceptions import RoleMappingException, CheckRoleMappingExistsException, ViewRoleMappingException, \
DeleteRoleMappingException, CreateRoleMappingException, ModifyRoleMappingException, CheckRoleExistsException, \
ViewAllRoleMappingException
from searchguard.roles import check_role_exists
PROPERTIES_KEYS = {"users", "backendroles", "hosts"}
def _send_api_request(role, properties):
"""Private function to process API calls for the rolemapping module"""
create_sg_rolemapping = requests.put('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
data=json.dumps(properties),
headers=settings.HEADER,
auth=settings.SEARCHGUARD_API_AUTH)
if create_sg_rolemapping.status_code in (200, 201):
# Role mapping created or updated successfully
return
# Error when creating/updating the role mapping
raise RoleMappingException('Error creating/updating the mapping for role {} - msg {}'.format(
role, create_sg_rolemapping.text))
def check_rolemapping_exists(role):
"""Returns True of False depending on whether the requested role mapping exists in Search Guard"""
rolemapping_exists_check = requests.get('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if rolemapping_exists_check.status_code == 200:
# Role mapping exists in SearchGuard
return True
elif rolemapping_exists_check.status_code == 404:
# Role mapping does not exist in SearchGuard
return False
else:
# Could not fetch valid output
raise CheckRoleMappingExistsException('Unknown error checking whether role mapping for {} exists'.format(role))
def view_all_rolemappings():
"""Returns the properties for the requested role mappings if it exists"""
view_all_sg_rolemapping = requests.get('{}/rolesmapping/'.format(settings.SEARCHGUARD_API_URL),
auth=settings.SEARCHGUARD_API_AUTH)
if view_all_sg_rolemapping.status_code == 200:
return json.loads(view_all_sg_rolemapping.text)
else:
# Could not fetch valid output
raise ViewAllRoleMappingException('Unknown error retrieving all role mappings')
def view_rolemapping(role):
"""Returns the properties for the requested role mapping if it exists"""
view_sg_rolemapping = requests.get('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if view_sg_rolemapping.status_code == 200:
return json.loads(view_sg_rolemapping.text)
elif view_sg_rolemapping.status_code == 404:
# Raise exception because the role mapping does not exist
raise ViewRoleMappingException('Error viewing the role mapping for {}, does not exist'.format(role))
else:
# Could not fetch valid output
raise ViewRoleMappingException('Unknown error checking whether role mapping for {} exists'.format(role))
def delete_rolemapping(role):
"""Deletes a Search Guard role mapping. Returns when successfully deleted"""
if check_rolemapping_exists(role):
# The role mapping does exist, let's delete it
delete_sg_rolemapping = requests.delete('{}/rolesmapping/{}'.format(settings.SEARCHGUARD_API_URL, role),
auth=settings.SEARCHGUARD_API_AUTH)
if delete_sg_rolemapping.status_code == 200:
# Role mapping deleted successfully
return
else:
# Raise exception because we could not delete the role mapping
raise DeleteRoleMappingException('Error deleting the role mapping for role {} '
'- msg: {}'.format(role, delete_sg_rolemapping.text))
else:
# Raise exception because the role mapping does not exist
raise DeleteRoleMappingException('Error deleting the role mapping for role {}, does not exist'.format(role))
def create_rolemapping(role, properties):
"""Creates a Search Guard role mapping. Returns when successfully created
It is required to specify at least one of: users, backendroles or hosts in the properties argument.
We do not use the PATCH endpoint for backwards compatibility with Elasticsearch before 6.4.0
:param str role: Name of the role mapping to create in Search Guard
:param dict properties: Search Guard role mapping fields (users, backendroles and/or hosts)
:raises: CreateRoleMappingException, CheckRoleExistsException
"""
if not check_role_exists(role):
raise CheckRoleExistsException('Role {} does not exist'.format(role))
if not any(key in properties for key in PROPERTIES_KEYS):
# Raise exception because we did not receive valid properties
raise CreateRoleMappingException('Error creating mapping for role {} - Include at least one of: users, '
'backendroles or hosts keys in the properties argument'.format(role))
_send_api_request(role, properties)
return
def modify_rolemapping(role, properties, action="replace"):
"""Modifies a Search Guard role mapping. Returns when successfully modified
It is required to specify at least one of: users, backendroles or hosts in the properties argument.
We do not use the PATCH endpoint for backwards compatibility with Elasticsearch before 6.4.0
:param str role: Name of the role mapping to create in Search Guard
:param dict properties: Search Guard role mapping fields (users, backendroles and/or hosts)
:param str action: Defines what to do with the properties. Defaults to replace (overwrites existing
properties). Other options are merge (combine the properties with existing ones) or split
(removes the properties from existing ones)
:raises: ModifyRoleMappingException
"""
if not check_rolemapping_exists(role):
raise ModifyRoleMappingException('Mapping for role {} does not exist'.format(role))
if not any(key in properties for key in PROPERTIES_KEYS):
# Raise exception because we did not receive valid properties
raise ValueError('Error modifying mapping for role {} - Include at least one of: users, '
'backendroles or hosts keys in the properties argument'.format(role))
# Retrieve existing properties of the role mapping:
rolemapping = view_rolemapping(role)
for property in PROPERTIES_KEYS:
if property not in rolemapping[role]:
rolemapping[role][property] = list()
if action is "merge":
# Merge the requested properties with existing properties in the role mapping.
rolemapping[role]['users'] = \
sorted(set(rolemapping[role]['users'] + properties.get('users', [])))
rolemapping[role]['backendroles'] = \
sorted(set(rolemapping[role]['backendroles'] + properties.get('backendroles', [])))
rolemapping[role]['hosts'] = \
sorted(set(rolemapping[role]['hosts'] + properties.get('hosts', [])))
_send_api_request(role, rolemapping[role])
return
if action is "split":
# Remove the requested properties from existing properties in the role mapping.
rolemapping[role]['users'] = [item for item in rolemapping[role]['users']
if item not in properties['users']]
rolemapping[role]['backendroles'] = [item for item in rolemapping[role]['backendroles']
if item not in properties['backendroles']]
rolemapping[role]['hosts'] = [item for item in rolemapping[role]['hosts']
if item not in properties['hosts']]
_send_api_request(role, rolemapping[role])
return
# No merge or split action, overwrite existing properties:
_send_api_request(role, properties)
def list_rolemappings_for_user(user, roles=None, skip_missing_roles=False):
"""Get list of rolemappings that contain the given user. It is possible to add a list of roles to check.
If no list is added, all rolemappings are evaluated. Non-existent roles can be excluded.
:param str user: Name of user
:param list roles: List of rolemappings to be checked for the given user
:param bool skip_missing_roles: Skip missing roles or throw ViewRoleMappingException
:returns list: list of rolemappings with the given user
:raises: ViewRoleMappingException
"""
if roles:
if skip_missing_roles:
user_rolemappings = list()
for role in roles:
try:
if user in view_rolemapping(role)[role]['users']:
user_rolemappings.append(role)
except ViewRoleMappingException:
pass
else:
user_rolemappings = [role for role in roles if user in view_rolemapping(role)[role]['users']]
else:
user_rolemappings = [r for r, p in view_all_rolemappings().items() if user in p['users']]
return sorted(set(user_rolemappings))
| [
"json.loads",
"json.dumps",
"searchguard.roles.check_role_exists",
"searchguard.exceptions.ViewAllRoleMappingException"
] | [((2315, 2355), 'json.loads', 'json.loads', (['view_all_sg_rolemapping.text'], {}), '(view_all_sg_rolemapping.text)\n', (2325, 2355), False, 'import json\n'), ((2419, 2492), 'searchguard.exceptions.ViewAllRoleMappingException', 'ViewAllRoleMappingException', (['"""Unknown error retrieving all role mappings"""'], {}), "('Unknown error retrieving all role mappings')\n", (2446, 2492), False, 'from searchguard.exceptions import RoleMappingException, CheckRoleMappingExistsException, ViewRoleMappingException, DeleteRoleMappingException, CreateRoleMappingException, ModifyRoleMappingException, CheckRoleExistsException, ViewAllRoleMappingException\n'), ((2842, 2878), 'json.loads', 'json.loads', (['view_sg_rolemapping.text'], {}), '(view_sg_rolemapping.text)\n', (2852, 2878), False, 'import json\n'), ((4840, 4863), 'searchguard.roles.check_role_exists', 'check_role_exists', (['role'], {}), '(role)\n', (4857, 4863), False, 'from searchguard.roles import check_role_exists\n'), ((727, 749), 'json.dumps', 'json.dumps', (['properties'], {}), '(properties)\n', (737, 749), False, 'import json\n')] |
"""
QUBO API solvers
QUBO solvers from Meta Analytics # noqa: E501
The version of the OpenAPI document: v1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import meta_analytics
from meta_analytics.model.solver_async_response import SolverAsyncResponse
class TestSolverAsyncResponse(unittest.TestCase):
"""SolverAsyncResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSolverAsyncResponse(self):
"""Test SolverAsyncResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = SolverAsyncResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((753, 768), 'unittest.main', 'unittest.main', ([], {}), '()\n', (766, 768), False, 'import unittest\n')] |
import configparser
import os
ApplicationDir = os.path.dirname(os.path.abspath(__file__))
HomeDir = os.path.expanduser('~')
CredentialDir = os.path.join(HomeDir, '.credentials')
if not os.path.exists(CredentialDir):
os.makedirs(CredentialDir)
CredentialFilePath = os.path.join(CredentialDir, 'CalSyncHAB.json')
CalSyncHABSettings = os.path.join(ApplicationDir, 'CalSyncHAB.ini')
Settings = configparser.ConfigParser()
Settings.read(CalSyncHABSettings)
ApplicationName = Settings.get('General', 'ApplicationName')
CalendarScope = Settings.get('Calendar', 'Scope')
CalendarId = Settings.get('Calendar', 'CalendarId')
CalendarMaxEvents = Settings.get('Calendar', 'MaxEvents')
CalendarTimeZone = Settings.get('Calendar', 'TimeZone')
CalendarClientSecretFile = Settings.get('Calendar', 'ClientSecretFile')
OpenHABHostName = Settings.get('OpenHAB', 'HostName')
OpenHABPort = Settings.get('OpenHAB', 'Port')
OpenHABItemPrefix = Settings.get('OpenHAB', 'ItemPrefix')
| [
"os.path.exists",
"configparser.ConfigParser",
"os.makedirs",
"os.path.join",
"os.path.abspath",
"os.path.expanduser"
] | [((101, 124), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (119, 124), False, 'import os\n'), ((141, 178), 'os.path.join', 'os.path.join', (['HomeDir', '""".credentials"""'], {}), "(HomeDir, '.credentials')\n", (153, 178), False, 'import os\n'), ((271, 317), 'os.path.join', 'os.path.join', (['CredentialDir', '"""CalSyncHAB.json"""'], {}), "(CredentialDir, 'CalSyncHAB.json')\n", (283, 317), False, 'import os\n'), ((339, 385), 'os.path.join', 'os.path.join', (['ApplicationDir', '"""CalSyncHAB.ini"""'], {}), "(ApplicationDir, 'CalSyncHAB.ini')\n", (351, 385), False, 'import os\n'), ((398, 425), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (423, 425), False, 'import configparser\n'), ((64, 89), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import os\n'), ((187, 216), 'os.path.exists', 'os.path.exists', (['CredentialDir'], {}), '(CredentialDir)\n', (201, 216), False, 'import os\n'), ((222, 248), 'os.makedirs', 'os.makedirs', (['CredentialDir'], {}), '(CredentialDir)\n', (233, 248), False, 'import os\n')] |
from django.core.exceptions import PermissionDenied
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.views.generic import ListView
from hours.models import TimecardObject, ReportingPeriod
from employees.models import UserData
from tock.utils import PermissionMixin
from .utils import get_dates, calculate_utilization
class GroupUtilizationView(PermissionMixin, ListView):
template_name = 'utilization/group_utilization.html'
requested_periods = 4
def dispatch(self, *args, **kwargs):
"""
Resolve recent reporting periods.
Although recent_rps is set to the last four reporting periods,
we could accept a form response that allows the user or app to
dynamically customize number of periods to include in the queryset.
Also, if they're not staff, we're going to go ahead and bounce
them to 403 so we don't make all these queries.
"""
if not self.request.user.is_authenticated:
return self.handle_no_permission()
if not self.request.user.is_staff:
raise PermissionDenied
self.available_periods = ReportingPeriod.objects.count()
if self.available_periods >= self.requested_periods:
self.recent_rps = get_dates(self.requested_periods)
else:
self.recent_rps = get_dates(self.available_periods)
return super().dispatch(*args, **kwargs)
def get_queryset(self):
"""
Gets submitted timecards for billable staff
limited to the reporting periods in question.
"""
# Start stubbing a dict for our units, using a quick list comprehension
units = [{
'id': choice[0],
'name': choice[1],
'slug': slugify(choice[1])
} for choice in UserData.UNIT_CHOICES]
# now we'll start building out that dict further,
# starting with the staff for each unit
for unit in units:
billable_staff = UserData.objects.filter(
is_billable=True,
current_employee=True,
unit = unit['id']
).prefetch_related('user')
for staffer in billable_staff:
"""
Create smallest possible TimecardObject queryset based on the
earliest_date value returned by get_dates(). Also prefetches the
related user and accounting code for later use.
We're casting this to values() because we need very little data,
it's faster, and we can work with it in pure python so we avoid
additional queries hitting the database.
"""
user_timecards = TimecardObject.objects.filter(
submitted=True,
timecard__user=staffer.user,
timecard__reporting_period__start_date__gte=self.recent_rps[3]
).select_related(
'timecard__reporting_period',
'project__accounting_code__billable'
).values(
'id',
'hours_spent',
'timecard__reporting_period',
'timecard__reporting_period__start_date',
'project__accounting_code__billable'
)
"""
We also need to know the billable cards, but
we only need the IDs to boil down each reporting period QS
and find the intersection below.
"""
user_billable_timecard_ids = user_timecards.filter(
project__accounting_code__billable=True
).values_list('id', flat=True)
"""
Filter the timecard queryset to only look for cards that are
related to reporting periods within the current fiscal year.
This operation is unnecessary except at the beginning of the
fiscal year.
"""
fytd_hours = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[2]:
fytd_hours.append(card['hours_spent'])
fytd_hours = sum(fytd_hours)
fytd_billable = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[2] \
and card['id'] in user_billable_timecard_ids:
fytd_billable.append(card['hours_spent'])
fytd_billable = sum(fytd_billable)
staffer.fytd = calculate_utilization(fytd_billable, fytd_hours)
staffer.fytd_all_hours_total: fytd_hours
staffer.fytd_billable_hours = fytd_billable if fytd_billable else 0.0
"""
Get hours for reporting periods within the last n reporting
periods, where n is the argument passed to the get_dates()
function.
"""
recent_hours = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[1]:
recent_hours.append(card['hours_spent'])
recent_hours = sum(recent_hours)
recent_billable = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[1] \
and card['id'] in user_billable_timecard_ids:
recent_billable.append(card['hours_spent'])
recent_billable = sum(recent_billable)
staffer.recent = calculate_utilization(recent_billable, recent_hours)
staffer.recent_all_hours_total = recent_hours
staffer.recent_billable_hours_total = recent_billable if recent_billable else 0.0
"""
Get hours from the latest reporting period
"""
last_hours = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[0]:
last_hours.append(card['hours_spent'])
last_hours = sum(last_hours)
last_billable = []
for card in user_timecards:
if card['timecard__reporting_period__start_date'] >= self.recent_rps[0] \
and card['id'] in user_billable_timecard_ids:
last_billable.append(card['hours_spent'])
last_billable = sum(last_billable)
staffer.last = calculate_utilization(last_billable, last_hours)
staffer.last_all_hours_total = last_hours
staffer.last_billable_hours_total = last_billable if last_billable else 0.0
staffer.last_url = reverse(
'reports:ReportingPeriodUserDetailView',
kwargs={
'username':staffer.user,
'reporting_period': self.recent_rps[4]
}
)
unit['billable_staff'] = billable_staff
last_total_hours = sum(TimecardObject.objects.filter(
timecard__reporting_period__start_date=self.recent_rps[4],
submitted=True,
timecard__user__user_data__unit=unit['id'],
).values_list('hours_spent', flat=True)
)
last_billable_hours = sum(TimecardObject.objects.filter(
submitted=True,
timecard__reporting_period__start_date=self.recent_rps[4],
timecard__user__user_data__unit=unit['id'],
project__accounting_code__billable=True
).values_list('hours_spent', flat=True)
)
# Query and calculate last in RP hours.
recent_total_hours = sum(TimecardObject.objects.filter(
submitted=True,
timecard__reporting_period__start_date__gte=self.recent_rps[1],
timecard__user__user_data__unit=unit['id']
).values_list('hours_spent', flat=True)
)
recent_billable_hours = sum(
TimecardObject.objects.filter(
submitted=True,
timecard__reporting_period__start_date__gte=self.recent_rps[1],
timecard__user__user_data__unit=unit['id'],
project__accounting_code__billable=True
).values_list('hours_spent', flat=True)
)
# Query and calculate all RP hours for FY to date.
fytd_total_hours = sum(
TimecardObject.objects.filter(
submitted=True,
timecard__reporting_period__start_date__gte=self.recent_rps[2],
timecard__user__user_data__unit=unit['id'],
).values_list('hours_spent', flat=True)
)
fytd_billable_hours = sum(TimecardObject.objects.filter(
submitted=True,
timecard__reporting_period__start_date__gte=self.recent_rps[2],
timecard__user__user_data__unit=unit['id'],
project__accounting_code__billable=True
).values_list('hours_spent', flat=True)
)
unit.update({
'last': {
'unit_name': unit['name'],
'billable_hours': last_billable_hours,
'total_hours': last_total_hours,
'utilization': calculate_utilization(
last_billable_hours,
last_total_hours
)
},
'recent': {
'unit_name': unit['name'],
'billable_hours': recent_billable_hours,
'total_hours': recent_total_hours,
'utilization': calculate_utilization(
recent_billable_hours,
recent_total_hours
)
},
'fytd': {
'unit_name': unit['name'],
'billable_hours': fytd_billable_hours,
'total_hours': fytd_total_hours,
'utilization': calculate_utilization(
fytd_billable_hours,
fytd_total_hours
)
}
})
return units
def get_context_data(self, **kwargs):
context = super(GroupUtilizationView, self).get_context_data(**kwargs)
context.update(
{
'through_date': self.recent_rps[0],
'recent_start_date': self.recent_rps[1],
}
)
return context
| [
"hours.models.ReportingPeriod.objects.count",
"employees.models.UserData.objects.filter",
"django.template.defaultfilters.slugify",
"hours.models.TimecardObject.objects.filter",
"django.urls.reverse"
] | [((1168, 1199), 'hours.models.ReportingPeriod.objects.count', 'ReportingPeriod.objects.count', ([], {}), '()\n', (1197, 1199), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((1791, 1809), 'django.template.defaultfilters.slugify', 'slugify', (['choice[1]'], {}), '(choice[1])\n', (1798, 1809), False, 'from django.template.defaultfilters import slugify\n'), ((7039, 7166), 'django.urls.reverse', 'reverse', (['"""reports:ReportingPeriodUserDetailView"""'], {'kwargs': "{'username': staffer.user, 'reporting_period': self.recent_rps[4]}"}), "('reports:ReportingPeriodUserDetailView', kwargs={'username':\n staffer.user, 'reporting_period': self.recent_rps[4]})\n", (7046, 7166), False, 'from django.urls import reverse\n'), ((2020, 2106), 'employees.models.UserData.objects.filter', 'UserData.objects.filter', ([], {'is_billable': '(True)', 'current_employee': '(True)', 'unit': "unit['id']"}), "(is_billable=True, current_employee=True, unit=unit[\n 'id'])\n", (2043, 2106), False, 'from employees.models import UserData\n'), ((7387, 7540), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'timecard__reporting_period__start_date': 'self.recent_rps[4]', 'submitted': '(True)', 'timecard__user__user_data__unit': "unit['id']"}), "(timecard__reporting_period__start_date=self.\n recent_rps[4], submitted=True, timecard__user__user_data__unit=unit['id'])\n", (7416, 7540), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((7693, 7894), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__reporting_period__start_date': 'self.recent_rps[4]', 'timecard__user__user_data__unit': "unit['id']", 'project__accounting_code__billable': '(True)'}), "(submitted=True,\n timecard__reporting_period__start_date=self.recent_rps[4],\n timecard__user__user_data__unit=unit['id'],\n project__accounting_code__billable=True)\n", (7722, 7894), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((8106, 8267), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__reporting_period__start_date__gte': 'self.recent_rps[1]', 'timecard__user__user_data__unit': "unit['id']"}), "(submitted=True,\n timecard__reporting_period__start_date__gte=self.recent_rps[1],\n timecard__user__user_data__unit=unit['id'])\n", (8135, 8267), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((8435, 8641), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__reporting_period__start_date__gte': 'self.recent_rps[1]', 'timecard__user__user_data__unit': "unit['id']", 'project__accounting_code__billable': '(True)'}), "(submitted=True,\n timecard__reporting_period__start_date__gte=self.recent_rps[1],\n timecard__user__user_data__unit=unit['id'],\n project__accounting_code__billable=True)\n", (8464, 8641), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((8895, 9056), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__reporting_period__start_date__gte': 'self.recent_rps[2]', 'timecard__user__user_data__unit': "unit['id']"}), "(submitted=True,\n timecard__reporting_period__start_date__gte=self.recent_rps[2],\n timecard__user__user_data__unit=unit['id'])\n", (8924, 9056), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((9218, 9424), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__reporting_period__start_date__gte': 'self.recent_rps[2]', 'timecard__user__user_data__unit': "unit['id']", 'project__accounting_code__billable': '(True)'}), "(submitted=True,\n timecard__reporting_period__start_date__gte=self.recent_rps[2],\n timecard__user__user_data__unit=unit['id'],\n project__accounting_code__billable=True)\n", (9247, 9424), False, 'from hours.models import TimecardObject, ReportingPeriod\n'), ((2750, 2892), 'hours.models.TimecardObject.objects.filter', 'TimecardObject.objects.filter', ([], {'submitted': '(True)', 'timecard__user': 'staffer.user', 'timecard__reporting_period__start_date__gte': 'self.recent_rps[3]'}), '(submitted=True, timecard__user=staffer.user,\n timecard__reporting_period__start_date__gte=self.recent_rps[3])\n', (2779, 2892), False, 'from hours.models import TimecardObject, ReportingPeriod\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 15:10:58 2021
@author: nguy0936
"""
from pyenvnoise.utils import ptiread
data = ptiread('R:\CMPH-Windfarm Field Study\Hornsdale\set2\Recording-1.1.pti')
import numpy as np
file_name = 'R:\CMPH-Windfarm Field Study\Hornsdale\set2\Recording-1.1.pti'
fid = open(file_name, "r", encoding='utf-8', errors='ignore')
headerlinecnt = 1
numref = 1
## Get all information
# get hearder information setup
# first 15 lines are setup info
tline = fid.readline()
# determine start header line
while tline != '[SETUP START]\n':
numref += 1
headerlinecnt += 1
end_setup = numref + 13
tline = fid.readline()
while headerlinecnt<end_setup:
tline = fid.readline()
headerlinecnt = headerlinecnt + 1
if headerlinecnt == (numref+2):
RECInfoSectionSize = int(tline.partition('=')[2])
if headerlinecnt == (numref+3):
RECInfoSectionPos = int(tline.partition('=')[2])
if headerlinecnt==(numref + 4):
SampleFrequency = int(float(tline.partition('=')[2]))
if headerlinecnt==(numref+5):
numchannels = int(tline.partition('=')[2])
if headerlinecnt==(numref+11):
Sample = int(tline.partition('=')[2])
if headerlinecnt==(numref+12):
Date = tline.partition('=')[2]
if headerlinecnt==(numref+13):
Time = tline.partition('=')[2]
## Get channel info
# the most important infor is correction factor
CorrectionFactor = []
for nchann in range(numchannels):
for i in range(10):
tline = fid.readline()
if tline.partition('=')[0] == 'CorrectionFactor':
CorrectionFactor.append(float(tline.partition('=')[2]))
if tline.partition('=')[0] == 'SampleFrequency':
SampleFrequency = int(tline.partition('=')[2])
## Read binary data
# poiter to main data
# 20 bytes may a subheader which may not important
fid.seek( RECInfoSectionPos + RECInfoSectionSize + 20, 0)
# the size of each segment, it around 250 ms
# fro Fs = 8192 Hz, it is 2048*4 bytes data + 4*4 bytes info (channel id)
dsize = np.fromfile(fid, dtype=np.int16, count=1)
cols = int(Sample/(dsize-4)*numchannels)
# back to start data
fid.seek( RECInfoSectionPos + RECInfoSectionSize + 20, 0)
#print(fid.tell())
# read all data into rawdata and ignore 4 first bytes with info
rawdata = np.fromfile(fid, np.int32).reshape((-1, dsize[0])).T
rawdata = np.delete(rawdata, np.s_[0:4], 0)
## Save data into channels
# calculate factors for actual Pa, full range is 16 bit system
CorrectionFactor = np.array(CorrectionFactor)
factor = CorrectionFactor / 2**16
# initilise array data
Data = np.empty([int(rawdata.shape[0]*rawdata.shape[1]/numchannels), numchannels])
for i in range(numchannels):
Data[:,i]= np.transpose( rawdata[:,i:rawdata.shape[1]:numchannels] ).ravel()*factor[i] | [
"numpy.fromfile",
"numpy.delete",
"pyenvnoise.utils.ptiread",
"numpy.array",
"numpy.transpose"
] | [((133, 209), 'pyenvnoise.utils.ptiread', 'ptiread', (['"""R:\\\\CMPH-Windfarm Field Study\\\\Hornsdale\\\\set2\\\\Recording-1.1.pti"""'], {}), "('R:\\\\CMPH-Windfarm Field Study\\\\Hornsdale\\\\set2\\\\Recording-1.1.pti')\n", (140, 209), False, 'from pyenvnoise.utils import ptiread\n'), ((2162, 2203), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.int16', 'count': '(1)'}), '(fid, dtype=np.int16, count=1)\n', (2173, 2203), True, 'import numpy as np\n'), ((2482, 2515), 'numpy.delete', 'np.delete', (['rawdata', 'np.s_[0:4]', '(0)'], {}), '(rawdata, np.s_[0:4], 0)\n', (2491, 2515), True, 'import numpy as np\n'), ((2626, 2652), 'numpy.array', 'np.array', (['CorrectionFactor'], {}), '(CorrectionFactor)\n', (2634, 2652), True, 'import numpy as np\n'), ((2419, 2445), 'numpy.fromfile', 'np.fromfile', (['fid', 'np.int32'], {}), '(fid, np.int32)\n', (2430, 2445), True, 'import numpy as np\n'), ((2840, 2896), 'numpy.transpose', 'np.transpose', (['rawdata[:, i:rawdata.shape[1]:numchannels]'], {}), '(rawdata[:, i:rawdata.shape[1]:numchannels])\n', (2852, 2896), True, 'import numpy as np\n')] |
import numpy as np
from OpenGL.arrays import vbo
from .Mesh_utils import MeshFuncs, MeshSignals, BBox
import openmesh
import copy
from .Shader import *
orig_set_vertex_property_array = openmesh.PolyMesh.set_vertex_property_array
def svpa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_vertex_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_vertices())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_vertex_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_vertex_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_vertex_property_array = svpa
orig_set_face_property_array = openmesh.PolyMesh.set_face_property_array
def sfpa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_face_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_faces())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_face_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_face_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_face_property_array = sfpa
orig_set_edge_property_array = openmesh.PolyMesh.set_edge_property_array
def sepa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_edge_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_faces())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_edge_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_edge_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_edge_property_array = sepa
DATA_TYPE_MAP = {
float: 'float',
int: 'int',
bool: 'bool',
str: 'str',
list: 'list',
tuple: 'tuple',
}
DEFAULT_VALUE_MAP = {
"float": 0.0,
"int": 0,
"vector2": [0, 0],
"vector3": [0, 0, 0],
"vector4": [0, 0, 0, 0],
"matrix3": np.identity(3, dtype=np.float64),
"matrix4": np.identity(4, dtype=np.float64),
"bool": False,
"list": [],
"tuple": {},
"custom": None,
"str": '',
}
DATA_IS_ARRAY_MAP = {
"float": True,
"int": True,
"vector2": True,
"vector3": True,
"vector4": True,
"matrix3": True,
"matrix4": True,
"bool": False,
"list": False,
"tuple": False,
"custom": False,
"str": False,
}
DATA_SHAPE_MAP = {
"float": None,
"int": None,
"vector2": [0, 2],
"vector3": [0, 3],
"vector4": [0, 4],
"matrix3": [0, 3, 3],
"matrix4": [0, 4, 4],
"bool": None,
"list": None,
"tuple": None,
"custom": None,
"str": None,
}
def get_shape(element_num, base_shape):
if base_shape is None:
shape = (element_num,)
else:
base_shape[0] = element_num
shape = tuple(base_shape)
return shape
class Mesh(object):
def __init__(self, mesh=None):
self.opts = {
'color': (1., 1., 1.),
'edgeColor': (0.5, 0.5, 0.5),
'pointColor': (1.0, 1.0, 0.0),
'shader': standShader,
'smooth': True,
'computeNormals': False,
}
self._attributeMap = {
"vertex": {
"pos": {'default_value': [0, 0, 0], 'type': 'vector3', 'is_array': True}
},
"face": {},
"edge": {},
"detail": {}
}
self.signals = MeshSignals()
self._selected = False
self.edge_colors = {
True: (1.0, 1.0, 0.0, 1.0),
False: (0.15, 0.15, 0.15, 1.0)
}
if mesh is None:
self._mesh = openmesh.PolyMesh()
else:
self._mesh = mesh
self._mesh.release_vertex_texcoords1D()
self._mesh.release_vertex_texcoords2D()
self._mesh.release_vertex_texcoords3D()
self._mesh.release_vertex_colors()
self._mesh.release_halfedge_texcoords1D()
self._mesh.release_halfedge_texcoords2D()
self._mesh.release_halfedge_texcoords3D()
self._mesh.release_halfedge_normals()
self._mesh.release_halfedge_colors()
self._mesh.release_face_colors()
self._mesh.release_face_texture_index()
self._mesh.release_edge_colors()
self.bbox = BBox()
self._GLFaces = None
self._flatColor = 0
self.__view = None
@property
def meshFuncs(self):
"""
Get a object which contains some utility mesh functions.
Returns:
MeshFuncs object.
"""
return MeshFuncs(self)
@property
def mesh(self):
"""
Get the real mesh object.
Returns:
openmesh.PolyMesh.
"""
return self._mesh
@property
def bbox_min(self):
"""
Get bounding box min value.
Returns:
list.
"""
vts = self.getVertexes()
if vts is None:
return [0, 0, 0]
try:
_bbox_min = list(np.min(vts, axis=0))
except:
return [0, 0, 0]
return _bbox_min
@property
def bbox_max(self):
"""
Get bounding box max value.
Returns:
list.
"""
vts = self.getVertexes()
if vts is None:
return [0, 0, 0]
try:
_bbox_max = list(np.max(vts, axis=0))
except:
return [0, 0, 0]
return _bbox_max
@property
def bbox_center(self):
"""
Get bounding box center value.
Returns:
list.
"""
_, __, _bbox_center = self.get_bbox_info()
return _bbox_center
def get_bbox_info(self):
"""
Get bounding box min, max, center.
Returns:
min->list, max->list, center->list.
"""
_min = self.bbox_min
_max = self.bbox_max
_center = [(_min[0] + _max[0]) / 2.0,
(_min[1] + _max[1]) / 2.0,
(_min[2] + _max[2]) / 2.0]
return _min, _max, _center
def visible(self):
return True
def _setView(self, v):
self.__view = v
def view(self):
return self.__view
def update(self):
v = self.view()
if v is None:
return
v.update()
def update_GLFace(self):
"""
Prepare the mesh data for OpenGL.
"""
b = self.getTriangulateMesh()
self._GLFaces = b.face_vertex_indices()
def getTriangulateMesh(self):
"""
Triangulate all faces and return a new mesh.
Returns:
openmesh.PolyMesh.
"""
b = copy.deepcopy(self._mesh)
b.triangulate()
return b
def setFlatColor(self, mode):
"""
Set if use flat color for render.
Args:
mode(bool): True means use flat color.
"""
if mode is True:
self._flatColor = 1
else:
self._flatColor = 0
def setSelected(self, sel):
self._selected = sel
# self.opts['edgeColor'] = self.edge_colors[False]
self.update()
def getSelected(self):
return self._selected
def drawBBox(self):
_min, _max, _center = self.get_bbox_info()
size = [abs(_min[0] - _max[0]),
abs(_min[1] - _max[1]),
abs(_min[2] - _max[2])]
tr = self.bbox.set(_center, size)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
a = np.array(tr.copyDataTo()).reshape((4, 4))
glMultMatrixf(a.transpose())
self.bbox.paint()
finally:
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def setShader(self, shader):
self.opts['shader'] = shader
self.update()
def shader(self):
return self.opts['shader']
def setColor(self, c):
self.opts['color'] = c
self.update()
def paint(self):
# self.setupGLState()
if self._GLFaces is None:
self.update_GLFace()
verts = self.getVertexes()
if verts is None:
return
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(verts)
color = self.getColors()
hasColor = color is not None
if not hasColor:
glColor3f(*self.opts['color'])
else:
glEnableClientState(GL_COLOR_ARRAY)
glColorPointerf(color)
if self.view().opts['drawFaces'] and self.getNumFaces() > 0:
with self.shader():
self.shader().setUniform1i("flatColor", self._flatColor)
norms = self.getNormals()
faces = self._GLFaces
uvs = self.getUVs()
try:
if norms is not None:
glEnableClientState(GL_NORMAL_ARRAY)
glNormalPointerf(norms)
if uvs is not None:
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
glTexCoordPointerf(uvs)
if faces is None:
glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))
else:
faces = faces.astype(np.uint).flatten()
glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)
finally:
glDisableClientState(GL_NORMAL_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
if self.view().opts['drawPoints']:
if self._mesh.has_vertex_property("pscale"):
pscale = self.getVertexAttribData("pscale")
else:
pscale = None
if not hasColor:
glColor3f(*self.opts['pointColor'])
with PointShader:
camPos = self.view().cameraPosition()
if camPos is not None:
PointShader.setUniform3f("camPos", camPos)
PointShader.setUniform1i("pixmode", 0)
else:
PointShader.setUniform1i("pixmode", 1)
if pscale is None:
PointShader.setUniform1f("unifrom_scale", 0.5)
glDrawArrays(GL_POINTS, 0, self.getNumVertexes())
else:
PointShader.setUniform1f("unifrom_scale", -1)
pscaleAttrib = PointShader.getAttribLocation("pscale")
vertexScales = vbo.VBO(pscale.flatten())
vertexScales.bind()
glEnableVertexAttribArray(pscaleAttrib)
glVertexAttribPointer(pscaleAttrib, 1, GL_FLOAT, False, 0, None)
glDrawArrays(GL_POINTS, 0, self.getNumVertexes())
vertexScales.unbind()
glDisableClientState(GL_COLOR_ARRAY)
if self.view().opts['drawEdges'] and self.getNumEdges() > 0:
edges = self.getEdges()
# color = self.getEdgesColors()
try:
# if color is None:
glColor3f(*self.opts['edgeColor'])
# else:
# glEnableClientState(GL_COLOR_ARRAY)
# glColorPointerf(color)
edges = edges.flatten()
glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)
finally:
pass
# glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
if self._selected:
self.drawBBox()
def getVertexes(self):
"""
Get mesh vertex positions.
Returns:
np.ndarray, shape = (nv,3).
"""
p = self._mesh.points()
if p.shape[0] == 0:
return None
return p
def getFaces(self):
"""
Get mesh faces-vertex indices.
Returns:
list of np.ndarray or None.
"""
f = self._mesh.face_vertex_indices()
if f.shape[0] == 0:
return None
return [face[face >= 0] for face in f]
def getColors(self):
"""
Get mesh vertex colors.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if self.hasAttribute('vertex', 'color'):
return self.getVertexAttribData("color")
else:
return None
def getUVs(self):
"""
Get mesh vertex texcoords.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if self.hasAttribute('vertex', 'uv'):
uv = self.getVertexAttribData("uv")
return uv
return None
def getNormals(self):
"""
Get mesh vertex normals.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if not self.hasAttribute('vertex', 'normal'):
if self.getNumFaces() == 0:
return None
self.createAttribute('vertex', 'normal', attribType='vector3', defaultValue=[0, 0, 0], applyValue=False)
self._mesh.update_vertex_normals()
return self._mesh.vertex_normals()
def getFaceNormals(self):
"""
Get mesh face normals.
Returns:
np.ndarray, shape = (nf,3) or None.
"""
if not self.hasAttribute('face', 'normal'):
if self.getNumFaces() == 0:
return None
self.createAttribute('face', 'normal', attribType='vector3', defaultValue=[0, 0, 0], applyValue=False)
self._mesh.update_face_normals()
return self._mesh.face_normals()
def getVertexFaces(self):
"""
Get mesh vertex-face indices.
Returns:
list of np.ndarray.
"""
vf = self._mesh.vertex_face_indices()
return [face[face >= 0] for face in vf]
def getEdges(self):
"""
Get mesh edge-vertex indices.
Returns:
np.ndarray or None.
"""
e = self._mesh.edge_vertex_indices()
if e.shape[0] == 0:
return None
return e
def getNumVertexes(self):
"""
Get mesh vertices count.
Returns:
int.
"""
return self._mesh.n_vertices()
def getNumFaces(self):
"""
Get mesh faces count.
Returns:
int.
"""
return self._mesh.n_faces()
def getNumEdges(self):
"""
Get mesh edges count.
Returns:
int.
"""
return self._mesh.n_edges()
@property
def attributeMap(self):
"""
Get mesh all attribute info.
Returns:
dict{'vertex':...,'edge':...,'face':...,'detail':...}.
"""
return self._attributeMap
def getAttribNames(self, allInOne=False, with_group=False):
"""
Get attribute names of the mesh.
Args:
allInOne(bool): return all names in one list.
with_group(bool): return names with group names.
Returns:
dict or list.
"""
if with_group:
v = list(self._attributeMap["vertex"].keys())
f = list(self._attributeMap["face"].keys())
e = list(self._attributeMap["edge"].keys())
else:
v = [i for i in self._attributeMap["vertex"].keys() if ":" not in i]
f = [i for i in self._attributeMap["face"].keys() if ":" not in i]
e = [i for i in self._attributeMap["edge"].keys() if ":" not in i]
d = list(self._attributeMap["detail"].keys())
if allInOne:
result = []
result.extend(v)
result.extend(f)
result.extend(e)
result.extend(d)
else:
result = {'vertex': v,
'face': f,
'edge': e,
'detail': d}
return result
def _getAttribType(self, attribClass, name):
"""
Get attribute value type.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
str.
"""
if attribClass == "vertex":
value = self.getVertexAttrib(name, 0)
elif attribClass == "edge":
value = self.getEdgeAttrib(name, 0)
elif attribClass == "face":
value = self.getFaceAttrib(name, 0)
elif attribClass == "detail":
value = self.getDetailAttrib(name)
else:
return 'none'
checkType = type(value)
if checkType is np.ndarray or checkType is list:
if checkType is np.ndarray:
size = value.size
else:
size = len(value)
if size == 2:
return 'vector2'
elif size == 3:
return 'vector3'
elif size == 4:
return 'vector4'
elif size == 9:
return 'matrix3'
elif size == 16:
return 'matrix4'
return DATA_TYPE_MAP.get(checkType, 'none')
def getAttribType(self, attribClass, name):
"""
Get attribute type.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
attribute type(str).
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['type']
def getAttribDefaultValue(self, attribClass, name):
"""
Get attribute default value
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
default attribute value.
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['default_value']
def getAttribIsArray(self, attribClass, name):
"""
Get whether attribute is array.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
bool.
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['is_array']
def getAttribInfo(self, attribClass, name):
"""
Get attribute info.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
dict {'default_value': defaultValue, 'type': attribType, is_array': array_mode}.
"""
return self._attributeMap[attribClass][name]
def setAttribInfo(self, attribClass, name, info):
"""
Set attribute info.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
info(dict): {'default_value': defaultValue, 'type': attribType, is_array': array_mode}.
"""
self._attributeMap[attribClass][name] = info
def createAttribute(self, attribClass, name, attribType=None, defaultValue=None, applyValue=True):
"""
Create a new attribute.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
attribType(str): type of the attribute value->[float, int, vector2, vector3, vector4, matrix3, matrix4 , bool, list, tuple, custom].
defaultValue(any): default value of the attribute.
applyValue(bool): apply the default value.
"""
if attribType is None:
attribType = self._getAttribType(attribClass, name)
if defaultValue is None:
defaultValue = DEFAULT_VALUE_MAP.get(attribType, None)
array_mode = DATA_IS_ARRAY_MAP.get(attribType, False)
shape = DATA_SHAPE_MAP.get(attribType, None)
if attribType == 'list':
shape = [0, len(defaultValue)]
if attribClass == "vertex":
if name == 'pos':
return
if array_mode:
self._mesh.vertex_property_array(name)
else:
self._mesh.vertex_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumVertexes(), shape))
if array_mode:
self._mesh.set_vertex_property_array(name, data)
else:
self._mesh.set_vertex_property(name, list(data))
elif attribClass == "face":
if array_mode:
self._mesh.face_property_array(name)
else:
self._mesh.face_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumFaces(), shape))
if array_mode:
self._mesh.set_face_property_array(name, data)
else:
self._mesh.set_face_property(name, list(data))
elif attribClass == "edge":
if array_mode:
self._mesh.edge_property_array(name)
else:
self._mesh.edge_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumEdges(), shape))
if array_mode:
self._mesh.set_edge_property_array(name, data)
else:
self._mesh.set_edge_property(name, list(data))
elif attribClass == "detail":
array_mode = False
else:
raise AttributeError("please input attribute class in ['vertex', 'edge', 'face', 'detail']")
self._attributeMap[attribClass][name] = {'default_value': defaultValue, 'type': attribType,
'is_array': array_mode}
def removeAttribute(self, attribClass, name):
"""
Remove a attribute.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
"""
if attribClass == "vertex":
if name == 'pos':
return
if self._mesh.has_vertex_property(name):
self._mesh.remove_vertex_property(name)
self._attributeMap["vertex"].pop(name)
elif attribClass == "face":
if self._mesh.has_face_property(name):
self._mesh.remove_face_property(name)
self._attributeMap["face"].pop(name)
elif attribClass == "edge":
if self._mesh.has_edge_property(name):
self._mesh.remove_edge_property(name)
self._attributeMap["edge"].pop(name)
elif attribClass == "detail":
if name in self._attributeMap["detail"].keys():
self._attributeMap["detail"].pop(name)
def renameAttribute(self, attribClass, name, new_name):
"""
Rename a attribute
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail']
names(str): specific attribute name
new_names(str): new attribute name
"""
self.copyAttribute(attribClass, name, new_name, True)
def copyAttribute(self, attribClass, from_name, to_name, remove=False):
"""
Copy attribute data to a new attribute.
Args:
attribClass: one of ['vertex', 'edge', 'face', 'detail'].
from_name: specific attribute name.
to_name: new attribute name.
remove: remove the from attribute.
"""
if not self.hasAttribute(attribClass, from_name):
raise AttributeError("attribute {} of {} not exist".format(from_name, attribClass))
if from_name == to_name:
return
attrib_type = self.getAttribType(attribClass, from_name)
default_value = self.getAttribDefaultValue(attribClass, from_name)
if attribClass == "vertex":
a = self.getVertexAttribData(from_name)
self.setVertexAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "face":
a = self.getFaceAttribData(from_name)
self.setFaceAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "edge":
a = self.getEdgeAttribData(from_name)
self.setEdgeAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "detail":
self.createAttribute("detail", to_name, attrib_type, default_value)
if remove:
self.removeAttribute(attribClass, from_name)
def hasAttribute(self, attribClass, name):
"""
Returns whether the mesh contains a specific attribute
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
bool.
"""
if attribClass in self._attributeMap.keys():
if name in self._attributeMap[attribClass].keys():
return True
return False
def addVertex(self, pos):
"""
Add a vertex to the mesh.
Args:
pos(list/tuple/np.ndarray): position of the new vertex, type can be: [list,ndarray,tuple].
Returns:
openmesh.VertexHandle.
"""
if type(pos) is list:
return self._mesh.add_vertex(np.array(pos))
elif type(pos) is np.ndarray:
return self._mesh.add_vertex(pos)
elif type(pos) is tuple:
return self._mesh.add_vertex(np.array([pos[0], pos[1], pos[2]]))
def addFace(self, vts):
"""
Add a face to the mesh.
Args:
vts(list): vertices of the new face, type can be: list of [openmesh.VertexHandle, int]
Returns:
openmesh.FaceHandle
"""
self._GLFaces = None
if type(vts[0]) is openmesh.VertexHandle:
return self._mesh.add_face(vts)
else:
return self._mesh.add_face([self._mesh.vertex_handle(i) for i in vts])
def addVertices(self, vts):
"""
Add vertices to the mesh.
Args:
vts: new vertices , np.ndarray or list, shape = (n,3).
"""
self._GLFaces = None
self._mesh.add_vertices(vts)
def addFaces(self, fcs):
"""
Add faces to the mesh.
Args:
fcs: new faces , np.ndarray or list of ndarray.
"""
self._GLFaces = None
self._mesh.add_faces(fcs)
def removeVertex(self, vt, isolate=False, clean=True):
"""
Remove a vertex from mesh.
Args:
vt(int/openmesh.VertexHandle): vertex index or vertex handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(vt) is not openmesh.VertexHandle:
vt = self._mesh.vertex_handle(vt)
if vt.idx() < self.getNumVertexes():
self._mesh.delete_vertex(vt, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeFace(self, fc, isolate=False, clean=True):
"""
Remove a face from mesh.
Args:
fc(int/openmesh.FaceHandle): face index or face handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(fc) is not openmesh.FaceHandle:
fc = self._mesh.face_handle(fc)
if fc.idx() < self.getNumFaces():
self._mesh.delete_face(fc, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeEdge(self, eg, isolate=False, clean=True):
"""
Remove an edge from mesh.
Args:
eg(int/openmesh.EdgeHandle): edge index or edge handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(eg) is not openmesh.EdgeHandle:
eg = self._mesh.edge_handle(eg)
if eg.idx() < self.getNumEdges():
self._mesh.delete_edge(eg, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeVertices(self, vts, isolate=False):
"""
Remove vertices from mesh.
@param vts: list of vertex index or list of vertex handle.
@param isolate: if True, delete the connected elements.
"""
for vt in vts:
self.removeVertex(vt, isolate, False)
self._mesh.garbage_collection()
def removeFaces(self, fcs, isolate=False):
"""
Remove faces from mesh.
Args:
fcs(list): list of face index or list of face handle.
isolate(bool): if True, delete the connected elements.
"""
for fc in fcs:
self.removeFace(fc, isolate, False)
self._mesh.garbage_collection()
def removeEdges(self, egs, isolate=False):
"""
Remove edges from mesh.
Args:
egs(list): list of edge index or list of edge handle.
isolate(bool): if True, delete the connected elements.
"""
for eg in egs:
self.removeEdge(eg, isolate, False)
self._mesh.garbage_collection()
def clear(self):
"""
Clear all mesh data.
"""
self._mesh.clear()
self._attributeMap = {}
self.signals.emit_attribChanged()
self.update()
def getVertexAttribData(self, name):
"""
Get vertex attribute data.
Args:
name(str): specific attribute name.
Returns:
vertex attribute data.
"""
if name == 'pos':
return self._mesh.points()
elif name == 'normal':
return self.getNormals()
else:
if not self.hasAttribute('vertex', name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('vertex', name):
return self._mesh.vertex_property_array(name)
else:
return self._mesh.vertex_property(name)
def getFaceAttribData(self, name):
"""
Get face attribute data.
Args:
name(str): specific attribute name.
Returns:
face attribute data.
"""
if name == 'normal':
return self.getFaceNormals()
else:
if not self._mesh.has_face_property(name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('face', name):
return self._mesh.face_property_array(name)
else:
return self._mesh.face_property(name)
def getEdgeAttribData(self, name):
"""
Get edge attribute data.
Args:
name(str): specific attribute name.
Returns:
edge attribute data.
"""
if not self._mesh.has_edge_property(name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('edge', name):
return self._mesh.edge_property_array(name)
else:
return self._mesh.edge_property(name)
def setVertexAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set vertex attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if name == 'pos':
self._mesh.points()[..., [0, 1, 2]] = data
elif name == 'normal':
self.getNormals()[..., [0, 1, 2]] = data
else:
if not self._mesh.has_vertex_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('vertex', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('vertex', name)
if is_array:
self._mesh.set_vertex_property_array(name, data)
else:
self._mesh.set_vertex_property(name, data)
self.signals.emit_attribChanged()
def setFaceAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set face attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if name == 'normal':
self.getFaceNormals()[..., [0, 1, 2]] = data
else:
if not self._mesh.has_face_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('face', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('face', name)
if is_array:
self._mesh.set_face_property_array(name, data)
else:
self._mesh.set_face_property(name, data)
self.signals.emit_attribChanged()
def setEdgeAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set edge attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if not self._mesh.has_edge_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('edge', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('edge', name)
if is_array:
self._mesh.set_edge_property_array(name, data)
else:
self._mesh.set_edge_property(name, data)
self.signals.emit_attribChanged()
def getVertexAttrib(self, name, index):
"""
Get a vertex attribute value.
Args:
name(str): specific attribute name.
index(int): vertex index.
Returns:
vertex attribute value.
"""
vh = self._mesh.vertex_handle(index)
if self._mesh.has_vertex_property(name):
return self._mesh.vertex_property(name, vh)
if name == 'pos':
return self._mesh.point(vh)
elif name == 'normal':
return self._mesh.normal(vh)
def getFaceAttrib(self, name, index):
"""
Get a face attribute value.
Args:
name(str): specific attribute name.
index(int): face index.
Returns:
face attribute value.
"""
fh = self._mesh.face_handle(index)
if self._mesh.has_face_property(name):
return self._mesh.face_property(name, fh)
if name == 'normal':
return self._mesh.normal(fh)
def getEdgeAttrib(self, name, index):
"""
Get a edge attribute value.
Args:
name(str): specific attribute name.
index(int): edge index.
Returns:
edge attribute value.
"""
eh = self._mesh.edge_handle(index)
if self._mesh.has_edge_property(name):
return self._mesh.edge_property(name, eh)
return None
def setVertexAttrib(self, name, index, value):
"""
Set a vertex attribute value.
Args:
name(str): specific attribute name.
index(int): vertex index.
value(any): attribute value.
"""
vh = self._mesh.vertex_handle(index)
if self._mesh.has_vertex_property(name):
self._mesh.set_vertex_property(name, vh, value)
self.signals.emit_attribChanged()
return True
if name == 'pos':
self._mesh.set_point(vh, value)
return True
elif name == 'normal':
self._mesh.set_normal(vh, value)
return True
return False
def setFaceAttrib(self, name, index, value):
"""
Set a face attribute value.
Args:
name(str): specific attribute name.
index(int): face index.
value(any): attribute value.
"""
fh = self._mesh.face_handle(index)
if self._mesh.has_face_property(name):
self._mesh.set_face_property(name, fh, value)
self.signals.emit_attribChanged()
return True
if name == 'normal':
self._mesh.set_normal(fh, value)
return True
return False
def setEdgeAttrib(self, name, index, value):
"""
Set a edge attribute value.
Args:
name(str): specific attribute name.
index(int): edge index.
value(any): attribute value.
"""
eh = self._mesh.edge_handle(index)
if self._mesh.has_edge_property(name):
self._mesh.set_edge_property(name, eh, value)
self.signals.emit_attribChanged()
return True
return False
def getDetailAttrib(self, name):
"""
Get a detail attribute value.
Args:
name(str): specific attribute name.
Returns:
detail attribute value.
"""
if name in self._attributeMap['detail'].keys():
return self._attributeMap['detail'][name]['default_value']
return None
def setDetailAttrib(self, name, value, attribType=None):
"""
Set a detail attribute value.
Args:
name(str): specific attribute name.
value(any): attribute value.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
"""
if name in self._attributeMap['detail'].keys():
self._attributeMap['detail'][name]['default_value'] = value
else:
if attribType is None:
raise AttributeError("detail attribute {} not exist, please create it or input attribType".format(name))
self.createAttribute('detail', name, attribType, value)
self.signals.emit_attribChanged()
def getAllVertexAttributes(self):
"""
Get all vertex attribute data.
Returns:
dict {attribute name: attribute data}.
"""
data = {}
for attrib_name in self._attributeMap["vertex"].keys():
data[attrib_name] = self.getVertexAttribData(attrib_name)
return data
def createGroup(self, groupClass, name, default=False):
"""
Create a group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
default(bool): if True, all elements will in the group.
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
self.createAttribute(groupClass, name, 'bool', default)
def getGroupData(self, groupClass, name):
"""
Get group data.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
Returns:
list of bool.
"""
if groupClass == 'vertex':
name = "v:" + name
if self._mesh.has_vertex_property(name):
return self._mesh.vertex_property_array(name).astype(np.bool)
elif groupClass == 'face':
name = "f:" + name
if self._mesh.has_face_property(name):
return self._mesh.face_property_array(name).astype(np.bool)
elif groupClass == 'edge':
name = "e:" + name
if self._mesh.has_edge_property(name):
return self._mesh.edge_property_array(name).astype(np.bool)
else:
raise AttributeError("class {} does not support group".format(groupClass))
raise AttributeError("Group {} does not exist".format(name))
def setGroupData(self, groupClass, name, data):
"""
Set group data.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
data(list): list of bool.
"""
if groupClass == 'vertex':
name = "v:" + name
self.setVertexAttribData(name, data, 'bool', False)
elif groupClass == 'face':
name = "f:" + name
self.setFaceAttribData(name, data, 'bool', False)
elif groupClass == 'edge':
name = "e:" + name
self.setEdgeAttribData(name, data, 'bool', False)
def getGroup(self, groupClass, name, index):
"""
Get whether a specific element is in the group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
index(int): element index.
Returns:
group value(bool).
"""
if groupClass == 'vertex':
name = "v:" + name
if self._mesh.has_vertex_property(name):
vh = self._mesh.vertex_handle(index)
return bool(self._mesh.vertex_property(name, vh))
elif groupClass == 'face':
name = "f:" + name
if self._mesh.has_face_property(name):
fh = self._mesh.face_handle(index)
return bool(self._mesh.face_property(name, fh))
elif groupClass == 'edge':
name = "e:" + name
if self._mesh.has_edge_property(name):
eh = self._mesh.edge_handle(index)
return bool(self._mesh.edge_property(name, eh))
def setGroup(self, groupClass, name, index, value):
"""
Set whether a specific element is in the group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
index(int): element index.
value(bool).
"""
assert type(value) is bool
if groupClass == 'vertex':
self.setVertexAttrib("v:" + name, index, value)
elif groupClass == 'face':
self.setFaceAttrib("f:" + name, index, value)
elif groupClass == 'edge':
self.setEdgeAttrib("e:" + name, index, value)
def getGroupNames(self, allInOne=False):
"""
Get all group names of the mesh.
Args
allInOne(bool): put all names in one list.
Returns:
dict or list.
"""
v = [i[2:] for i in self._attributeMap["vertex"].keys() if ":" in i]
f = [i[2:] for i in self._attributeMap["face"].keys() if ":" in i]
e = [i[2:] for i in self._attributeMap["edge"].keys() if ":" in i]
if allInOne:
result = []
result.extend(v)
result.extend(f)
result.extend(e)
else:
result = {'vertex': v,
'face': f,
'edge': e}
return result
def removeGroup(self, groupClass, name):
"""
Remove a group from mesh.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
if ":" in name:
self.removeAttribute(groupClass, name)
def hasGroup(self, groupClass, name):
"""
Get whether the mesh contain a specific group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
Returns:
bool
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
if ":" in name:
return self.hasAttribute(groupClass, name)
else:
return False
| [
"numpy.identity",
"numpy.product",
"openmesh.PolyMesh",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.empty",
"copy.deepcopy",
"numpy.shape",
"numpy.broadcast_to"
] | [((3084, 3116), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (3095, 3116), True, 'import numpy as np\n'), ((3133, 3165), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'np.float64'}), '(4, dtype=np.float64)\n', (3144, 3165), True, 'import numpy as np\n'), ((7821, 7846), 'copy.deepcopy', 'copy.deepcopy', (['self._mesh'], {}), '(self._mesh)\n', (7834, 7846), False, 'import copy\n'), ((722, 745), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (730, 745), True, 'import numpy as np\n'), ((833, 856), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (841, 856), True, 'import numpy as np\n'), ((1608, 1631), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (1616, 1631), True, 'import numpy as np\n'), ((1717, 1740), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (1725, 1740), True, 'import numpy as np\n'), ((2488, 2511), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (2496, 2511), True, 'import numpy as np\n'), ((2597, 2620), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (2605, 2620), True, 'import numpy as np\n'), ((4783, 4802), 'openmesh.PolyMesh', 'openmesh.PolyMesh', ([], {}), '()\n', (4800, 4802), False, 'import openmesh\n'), ((946, 991), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (961, 991), True, 'import numpy as np\n'), ((1828, 1873), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (1843, 1873), True, 'import numpy as np\n'), ((2708, 2753), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (2723, 2753), True, 'import numpy as np\n'), ((6156, 6175), 'numpy.min', 'np.min', (['vts'], {'axis': '(0)'}), '(vts, axis=0)\n', (6162, 6175), True, 'import numpy as np\n'), ((6510, 6529), 'numpy.max', 'np.max', (['vts'], {'axis': '(0)'}), '(vts, axis=0)\n', (6516, 6529), True, 'import numpy as np\n'), ((27023, 27036), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (27031, 27036), True, 'import numpy as np\n'), ((27196, 27230), 'numpy.array', 'np.array', (['[pos[0], pos[1], pos[2]]'], {}), '([pos[0], pos[1], pos[2]])\n', (27204, 27230), True, 'import numpy as np\n'), ((10338, 10366), 'numpy.product', 'np.product', (['verts.shape[:-1]'], {}), '(verts.shape[:-1])\n', (10348, 10366), True, 'import numpy as np\n')] |
#
from __future__ import print_function
import datetime as dt
import math
from apps.ots.strategy.performance import Performance
try:
import Queue as queue
except ImportError:
import queue
import numpy as np
import pandas as pd
#
from apps.ots.event.ots_event import OtsEvent
from apps.ots.event.signal_event import SignalEvent
from apps.ots.event.order_event import OrderEvent
from apps.ots.event.fill_event import FillEvent
from apps.ots.strategy.naive_risk_manager import NaiveRiskManager
from apps.ots.order.ots_order import OtsOrder
from apps.ots.ots_util import OtsUtil
class PortfolioHft(object):
'''
处理所有持仓和市值,其接收Stategy产生的SignalEvent,根据现有持仓情况和规则,
以及风控模型,还有就是算法交易情况(如交易量巨大时分步来进行),生成OrderEvent,
同时接收Exchange系统产生的FillEvent,最终更新持仓情况。
position DataFrame 存放用时间为索引的持仓数量
holdings DataFrame 存放特定时间索引对应的每个代码的现金和总的市场持仓价值,
以及资产组合总量的百分比变化
'''
def __init__(self, bars, events, start_date, initial_capital=100000):
self.risk_manager = NaiveRiskManager()
self.bars = bars
self.events = events
self.symbol_list = self.bars.symbol_list
self.start_date = start_date
self.initial_capital = initial_capital
self.all_positions = self.construct_all_positions()
self.current_positions = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
self.all_holdings = self.construct_all_holdings()
self.current_holdings = self.construct_current_holdings()
def construct_all_positions(self):
'''
使用start_date确定开始日期,构造持仓列表
'''
d = dict((k, v) for k, v in [(s, 0.0) for s in self.symbol_list])
d['datetime'] = self.start_date
return [d]
def construct_all_holdings(self):
'''
构造字典保存所有资产组合的开始日期的价值
'''
d = dict((k, v) for k, v in [(s, 0.0) for s in self.symbol_list])
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return [d]
def construct_current_holdings(self):
''' 构造这典保存所有资产组合的当前价值 '''
d = dict((k, v) for k, v in [(s, 0.0) for s in self.symbol_list])
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return d
def update_timeindex(self, event):
'''
根据当前市场数据,增加一条新数据,反映当前所有持仓的市场价值
event为市场事件
'''
latest_datetime = self.bars.get_latest_bar_dt(self.symbol_list[0])
dp = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
dp['datetime'] = latest_datetime
for s in self.symbol_list:
dp[s] = self.current_positions[s]
self.all_positions.append(dp)
dh = dict((k, v) for k, v in [(s, 0) for s in self.symbol_list])
dh['datetime'] = latest_datetime
dh['cash'] = self.current_holdings['cash']
dh['commission'] = self.current_holdings['commission']
dh['total'] = self.current_holdings['total']
for s in self.symbol_list:
market_value = self.current_positions[s] * self.bars.get_latest_bar_value(s, 'close')
dh[s] = market_value
dh['total'] += market_value
self.all_holdings.append(dh)
def update_positions_from_fill(self, fillEvent):
'''
根据FillEvent更新持仓矩阵来反映最新持仓
'''
fill_direction = 0
if fillEvent.direction == 'BUY':
fill_direction = 1
elif fillEvent.direction == 'SELL':
fill_direction = -1
self.current_positions[fillEvent.symbol] += fill_direction * fillEvent.quantity
def update_holdings_from_fill(self, fillEvent):
'''
根据FillEvent更新持仓价值矩阵
'''
fill_direction = 0
if fillEvent.direction == 'BUY':
fill_direction = 1
elif fillEvent.direction == 'SELL':
fill_direction = -1
fill_price = self.bars.get_latest_bar_value(fillEvent.symbol, 'close')
amount = fill_direction * fill_price * fillEvent.quantity
self.current_holdings[fillEvent.symbol] += amount
self.current_holdings['commission'] = fillEvent.commission
self.current_holdings['cash'] -= (amount + fillEvent.commission)
self.current_holdings['total'] -= (amount + fillEvent.commission)
def update_fill(self, event):
'''
接收到FillEvent后更新持仓和市值
'''
self.update_positions_from_fill(event)
self.update_holdings_from_fill(event)
def generate_order(self, signalEvent):
'''
根据信号事件生成订单对象
'''
order_event = None
symbol = signalEvent.symbol
direction = signalEvent.direction
strength = signalEvent.strength
mkt_quantity = self.risk_manager.get_mkt_quantity(signalEvent=signalEvent)
curr_quantity = self.current_holdings[symbol]
order_type = OtsOrder.OT_MKT
if direction == 'LONG' and curr_quantity == 0:
order_event = OrderEvent(symbol, order_type, mkt_quantity, 'BUY')
elif direction == 'SHORT' and curr_quantity >= mkt_quantity:
order_event = OrderEvent(symbol, order_type, mkt_quantity, 'SELL')
elif direction == 'EXIT' and curr_quantity>0:
order_event = OrderEvent(symbol, order_type, abs(mkt_quantity), 'SELL')
elif direction == 'EXIT' and curr_quantity<0:
order_event = OrderEvent(symbol, order_type, abs(mkt_quantity), 'BUY')
return order_event
def update_signal(self, event):
order_event = self.generate_order(event)
self.events.put(order_event)
def update_from_event(self, event):
''' update_signal
'''
if event.type == OtsEvent.ET_SIGNAL:
self.update_signal(event)
elif event.type == OtsEvent.ET_FILL:
self.update_fill(event)
def create_equity_curve_dataframe(self):
'''
基于all_holdings的DataFrame对象
'''
curve = pd.DataFrame(self.all_holdings)
print('curve: {0}; {1};'.format(type(curve), curve))
curve.set_index('datetime', inplace=True)
curve['returns'] = curve['total'].pct_change()
curve['equity_curve'] = (1.0 + curve['returns']).cumprod()
self.equity_curve = curve
def output_summary_stats(self):
'''
所有资产组合的合计信息
'''
total_return = self.equity_curve['equity_curve'][-1]
returns = self.equity_curve['returns']
pnl = self.equity_curve['equity_curve']
# 由于是分钟级数据,每年交易252天,每天6.5小时,每小时60分钟
sharpe_ratio = Performance.calculate_sharpe_ratio(returns, periods=252*6.5*60)
drawdown, max_dd, dd_duration = Performance.calculate_drawdowns(pnl)
self.equity_curve['drawdown'] = drawdown
stats = [('Total Return', '{0:0.2f}%'.format((total_return - 1.0)*100)),
('Sharpe Ratio', '{0:0.2f}'.format(sharpe_ratio)),
('Max Drawdown', '{0:0.2f}'.format(max_dd*100)),
('Drawdown Duration', '{0}'.format(dd_duration))
]
self.equity_curve.to_csv('equity.csv')
return stats
| [
"apps.ots.strategy.performance.Performance.calculate_sharpe_ratio",
"apps.ots.strategy.performance.Performance.calculate_drawdowns",
"apps.ots.strategy.naive_risk_manager.NaiveRiskManager",
"pandas.DataFrame",
"apps.ots.event.order_event.OrderEvent"
] | [((982, 1000), 'apps.ots.strategy.naive_risk_manager.NaiveRiskManager', 'NaiveRiskManager', ([], {}), '()\n', (998, 1000), False, 'from apps.ots.strategy.naive_risk_manager import NaiveRiskManager\n'), ((5963, 5994), 'pandas.DataFrame', 'pd.DataFrame', (['self.all_holdings'], {}), '(self.all_holdings)\n', (5975, 5994), True, 'import pandas as pd\n'), ((6566, 6633), 'apps.ots.strategy.performance.Performance.calculate_sharpe_ratio', 'Performance.calculate_sharpe_ratio', (['returns'], {'periods': '(252 * 6.5 * 60)'}), '(returns, periods=252 * 6.5 * 60)\n', (6600, 6633), False, 'from apps.ots.strategy.performance import Performance\n'), ((6670, 6706), 'apps.ots.strategy.performance.Performance.calculate_drawdowns', 'Performance.calculate_drawdowns', (['pnl'], {}), '(pnl)\n', (6701, 6706), False, 'from apps.ots.strategy.performance import Performance\n'), ((4974, 5025), 'apps.ots.event.order_event.OrderEvent', 'OrderEvent', (['symbol', 'order_type', 'mkt_quantity', '"""BUY"""'], {}), "(symbol, order_type, mkt_quantity, 'BUY')\n", (4984, 5025), False, 'from apps.ots.event.order_event import OrderEvent\n'), ((5121, 5173), 'apps.ots.event.order_event.OrderEvent', 'OrderEvent', (['symbol', 'order_type', 'mkt_quantity', '"""SELL"""'], {}), "(symbol, order_type, mkt_quantity, 'SELL')\n", (5131, 5173), False, 'from apps.ots.event.order_event import OrderEvent\n')] |
import numpy as np
import pyautogui
import imutils
from mss import mss
from PIL import Image
import cv2
import copy
import argparse
from hand_poses import HandPoses
from hand_detect import HandDetect
from delay import Delay
from spotify_controls import SpotifyControls
parser = argparse.ArgumentParser()
parser.add_argument("--detect_threshold", help="minimum percentage of a hand prediction",
type=float, default=0.90)
parser.add_argument("--pose_threshold", help="SVC threshold in classification confidence",
type=float, default=0.90)
parser.add_argument("--path_classifier", help="path to classifier",
type=str, default='models/spotify_gesture_cmd_model.pkl')
parser.add_argument("--moving_average", help="minimum percentage of pose prediction of last frames",
type=float, default=0.85)
parser.add_argument("--frames_in", help="number of frames to consider to predict a pose when in action",
type=int, default=20)
parser.add_argument("--frames_out", help="number of frames to consider to predict a pose",
type=int, default=40)
parser.add_argument("--show_lm", help="show hand landmarks",
type=bool, default=True)
args = parser.parse_args()
hand_detect = HandDetect(detect_threshold=args.detect_threshold)
hand_pose = HandPoses(pose_threshold=args.pose_threshold,
name_classifier=args.path_classifier)
# This will log into Spotify using your personal account with a separate popup window
spotify_controller = SpotifyControls()
delay = Delay(hand_pose.classifier.classes_, moving_average=args.moving_average, frames_in_action=args.frames_in, frames_out=args.frames_out)
webcam = True
if webcam:
cap = cv2.VideoCapture(0)
else:
sct = mss()
with hand_detect.mp_hands.Hands(
max_num_hands=1,
min_detection_confidence=0.6,
min_tracking_confidence=0.5) as hands:
while True:
if webcam:
ret, image = cap.read()
else: # screenshot
ret = True
# image = pyautogui.screenshot()
# image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Higher fps with mss for screen grab:
mon = sct.monitors[0]
image = np.array(sct.grab(mon))
image = np.flip(image[:, :, :3], 2)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if not ret: # Image was not successfully read!
print('\rNo image! Is a webcam available?', '', end='')
continue
raw_frame = copy.deepcopy(image)
image = cv2.flip(image, 1)
image_height, image_width, _ = image.shape
#spotify_controller.draw_mouse_rectangle(image)
for (pose, confidence), (lm, mp_lm) in hand_detect.detect_hand(hands=hands,
image=raw_frame,
hand_pose=hand_pose,
delay=delay):
if args.show_lm:
hand_detect.mp_drawing.draw_landmarks(
image, mp_lm, hand_detect.mp_hands.HAND_CONNECTIONS)
if pose is not None:
cv2.putText(image, f"{pose}: ({confidence:.2f})",
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)
print(f"\r{pose}: ({confidence:.2f}) ", "", end="")
spotify_controller.execute_cmd(pose=pose, lm=lm, delay=delay, frame=image)
else:
cv2.putText(image, f"Idle", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)
if delay.ignore_frames:
cv2.putText(image, f"Position locked", (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 100), 2)
key = (cv2.waitKey(10) & 0xFF)
image = cv2.resize(image, (int(image_width * .6),
int(image_height * .6)), interpolation=cv2.INTER_AREA)
# if webcam:
cv2.imshow('frame', image)
if key == ord('q'):
break
if webcam:
cap.release()
cv2.destroyAllWindows()
| [
"numpy.flip",
"delay.Delay",
"argparse.ArgumentParser",
"mss.mss",
"hand_poses.HandPoses",
"hand_detect.HandDetect",
"cv2.flip",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"copy.deepcopy",
"spotify_controls.SpotifyControls",
"cv2.waitKey"
] | [((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((1307, 1357), 'hand_detect.HandDetect', 'HandDetect', ([], {'detect_threshold': 'args.detect_threshold'}), '(detect_threshold=args.detect_threshold)\n', (1317, 1357), False, 'from hand_detect import HandDetect\n'), ((1370, 1458), 'hand_poses.HandPoses', 'HandPoses', ([], {'pose_threshold': 'args.pose_threshold', 'name_classifier': 'args.path_classifier'}), '(pose_threshold=args.pose_threshold, name_classifier=args.\n path_classifier)\n', (1379, 1458), False, 'from hand_poses import HandPoses\n'), ((1583, 1600), 'spotify_controls.SpotifyControls', 'SpotifyControls', ([], {}), '()\n', (1598, 1600), False, 'from spotify_controls import SpotifyControls\n'), ((1609, 1746), 'delay.Delay', 'Delay', (['hand_pose.classifier.classes_'], {'moving_average': 'args.moving_average', 'frames_in_action': 'args.frames_in', 'frames_out': 'args.frames_out'}), '(hand_pose.classifier.classes_, moving_average=args.moving_average,\n frames_in_action=args.frames_in, frames_out=args.frames_out)\n', (1614, 1746), False, 'from delay import Delay\n'), ((4278, 4301), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4299, 4301), False, 'import cv2\n'), ((1779, 1798), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1795, 1798), False, 'import cv2\n'), ((1815, 1820), 'mss.mss', 'mss', ([], {}), '()\n', (1818, 1820), False, 'from mss import mss\n'), ((2608, 2628), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (2621, 2628), False, 'import copy\n'), ((2646, 2664), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2654, 2664), False, 'import cv2\n'), ((4174, 4200), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'image'], {}), "('frame', image)\n", (4184, 4200), False, 'import cv2\n'), ((2352, 2379), 'numpy.flip', 'np.flip', (['image[:, :, :3]', '(2)'], {}), '(image[:, :, :3], 2)\n', (2359, 2379), True, 'import numpy as np\n'), ((2400, 2438), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2412, 2438), False, 'import cv2\n'), ((3972, 3987), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (3983, 3987), False, 'import cv2\n'), ((3302, 3415), 'cv2.putText', 'cv2.putText', (['image', 'f"""{pose}: ({confidence:.2f})"""', '(30, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 255, 100)', '(2)'], {}), "(image, f'{pose}: ({confidence:.2f})', (30, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)\n", (3313, 3415), False, 'import cv2\n'), ((3652, 3743), 'cv2.putText', 'cv2.putText', (['image', 'f"""Idle"""', '(30, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 255, 100)', '(2)'], {}), "(image, f'Idle', (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, \n 255, 100), 2)\n", (3663, 3743), False, 'import cv2\n'), ((3827, 3929), 'cv2.putText', 'cv2.putText', (['image', 'f"""Position locked"""', '(30, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(255, 0, 100)', '(2)'], {}), "(image, f'Position locked', (30, 60), cv2.FONT_HERSHEY_SIMPLEX, \n 0.7, (255, 0, 100), 2)\n", (3838, 3929), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '1.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(230, 20, 331, 61))
font = QtGui.QFont()
font.setPointSize(15)
font.setStrikeOut(False)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(200, 90, 381, 61))
font = QtGui.QFont()
font.setPointSize(15)
font.setStrikeOut(False)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(130, 160, 531, 201))
font = QtGui.QFont()
font.setPointSize(15)
font.setStrikeOut(False)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(240, 380, 301, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_4.setWordWrap(True)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(240, 410, 301, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_5.setWordWrap(True)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(240, 440, 301, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_6.setWordWrap(True)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(240, 470, 301, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_7.setWordWrap(True)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(240, 500, 301, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_8.setWordWrap(True)
self.label_8.setObjectName("label_8")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(340, 540, 93, 28))
self.pushButton.setObjectName("pushButton")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Welcome"))
self.label.setText(_translate("MainWindow", "Welcome to TextGen V1.1"))
self.label_2.setText(_translate("MainWindow", "The Next Gen ML Text Generator"))
self.label_3.setText(_translate("MainWindow", "This Project is a proof of concept that machine learning can be used and is used for various purposes but at the same time require lots and lots of computing power this project is trained on Wikipedia Articles which were collected and tokenized by Metamind "))
self.label_4.setText(_translate("MainWindow", "The Project is made by"))
self.label_5.setText(_translate("MainWindow", "<NAME>"))
self.label_6.setText(_translate("MainWindow", "<NAME>"))
self.label_7.setText(_translate("MainWindow", "<NAME>"))
self.label_8.setText(_translate("MainWindow", "<NAME>"))
self.pushButton.setText(_translate("MainWindow", "Continue"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtGui.QFont",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QPushButton"
] | [((5117, 5149), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5139, 5149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5167, 5190), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (5188, 5190), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((409, 438), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (426, 438), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((518, 554), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (534, 554), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((633, 646), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (644, 646), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((863, 899), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (879, 899), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((980, 993), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (991, 993), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1218, 1254), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1234, 1254), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1337, 1350), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1348, 1350), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1614, 1650), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1630, 1650), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1732, 1745), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1743, 1745), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1996, 2032), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2012, 2032), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2114, 2127), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2125, 2127), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2378, 2414), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2394, 2414), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2496, 2509), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2507, 2509), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2760, 2796), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2776, 2796), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2878, 2891), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2889, 2891), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3142, 3178), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3158, 3178), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3260, 3273), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3271, 3273), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3527, 3568), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3548, 3568), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3770, 3802), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (3790, 3802), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3949, 3998), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (3986, 3998), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((586, 616), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(230)', '(20)', '(331)', '(61)'], {}), '(230, 20, 331, 61)\n', (598, 616), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((933, 963), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(90)', '(381)', '(61)'], {}), '(200, 90, 381, 61)\n', (945, 963), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1288, 1320), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(130)', '(160)', '(531)', '(201)'], {}), '(130, 160, 531, 201)\n', (1300, 1320), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1684, 1715), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(380)', '(301)', '(21)'], {}), '(240, 380, 301, 21)\n', (1696, 1715), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2066, 2097), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(410)', '(301)', '(21)'], {}), '(240, 410, 301, 21)\n', (2078, 2097), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2448, 2479), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(440)', '(301)', '(21)'], {}), '(240, 440, 301, 21)\n', (2460, 2479), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2830, 2861), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(470)', '(301)', '(21)'], {}), '(240, 470, 301, 21)\n', (2842, 2861), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3212, 3243), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(240)', '(500)', '(301)', '(21)'], {}), '(240, 500, 301, 21)\n', (3224, 3243), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3605, 3635), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(340)', '(540)', '(93)', '(28)'], {}), '(340, 540, 93, 28)\n', (3617, 3635), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import explorer
import random
import os
import logic
import copy
import patches.dungeonEntrances
import patches.goal
from locations.items import *
class Error(Exception):
pass
class Randomizer:
def __init__(self, rom, options, *, seed=None):
self.seed = seed
if self.seed is None:
self.seed = os.urandom(16)
self.rnd = random.Random(self.seed)
if options.race:
self.rnd.random() # Just pull 1 random number so race seeds are different then from normal seeds but still stable.
if options.multiworld:
assert not options.dungeonshuffle, "Cannot use dungeonshuffle in multiworld at the moment"
self.__logic = logic.MultiworldLogic(options, self.rnd)
else:
self.__logic = logic.Logic(options, self.rnd)
self.item_pool = {}
self.spots = []
self.readItemPool(rom)
self.modifyDefaultItemPool()
assert self.logicStillValid(), "Sanity check failed"
bail_counter = 0
while self.item_pool:
if not self.placeItem():
bail_counter += 1
if bail_counter > 10:
raise Error("Failed to place an item for a bunch of retries")
else:
bail_counter = 0
# Apply patches to rom
if options.goal == "random":
patches.goal.setRequiredInstrumentCount(rom, self.rnd.randint(-1, 8))
if self.__logic.entranceMapping:
patches.dungeonEntrances.changeEntrances(rom, self.__logic.entranceMapping)
if options.multiworld:
for n in range(2):
result_rom = copy.deepcopy(rom)
for spot in self.__logic.iteminfo_list:
if spot.world == n:
spot.patch(result_rom, spot.item)
result_rom.save("Multiworld_%d.gbc" % (n + 1))
else:
for spot in self.__logic.iteminfo_list:
spot.patch(rom, spot.item)
def addItem(self, item):
self.item_pool[item] = self.item_pool.get(item, 0) + 1
def removeItem(self, item):
self.item_pool[item] -= 1
if self.item_pool[item] == 0:
del self.item_pool[item]
def addSpot(self, spot):
self.spots.append(spot)
def removeSpot(self, spot):
self.spots.remove(spot)
def readItemPool(self, rom):
# Collect the item pool from the rom to see which items we can randomize.
for spot in self.__logic.iteminfo_list:
item = spot.read(rom)
self.item_pool[item] = self.item_pool.get(item, 0) + 1
for spot in self.__logic.iteminfo_list:
# If a spot has no other placement options, just ignore this spot.
if len(spot.getOptions()) > 1:
self.addSpot(spot)
spot.item = None
else:
self.removeItem(spot.getOptions()[0])
spot.item = spot.getOptions()[0]
def modifyDefaultItemPool(self):
# Remove rupees from the item pool and replace them with other items to create more variety
rupee_item = []
rupee_item_count = []
for k, v in self.item_pool.items():
if k.startswith("RUPEES_"):
rupee_item.append(k)
rupee_item_count.append(v)
rupee_chests = sum(v for k, v in self.item_pool.items() if k.startswith("RUPEES_"))
for n in range(rupee_chests // 5):
new_item = self._rndChoices((BOMB, SINGLE_ARROW, ARROWS_10, MAGIC_POWDER, MEDICINE), (10, 5, 10, 10, 1))
while True:
remove_item = self._rndChoices(rupee_item, rupee_item_count)
if remove_item in self.item_pool:
break
self.addItem(new_item)
self.removeItem(remove_item)
def _rndChoices(self, population, weights):
import bisect
import itertools
cum_weights = list(itertools.accumulate(weights))
return population[bisect.bisect(cum_weights, self.rnd.random() * cum_weights[-1], 0, len(cum_weights) - 1)]
def placeItem(self):
# Find a random spot and item to place
spot = self.rnd.choice(self.spots)
options = list(filter(lambda i: i in self.item_pool, spot.getOptions()))
if not options:
return False
item = self.rnd.choice(sorted(options))
spot.item = item
self.removeItem(item)
self.removeSpot(spot)
if not self.logicStillValid():
spot.item = None
self.addItem(item)
self.addSpot(spot)
#print("Failed to place:", item)
return False
#print("Placed:", item)
return True
def logicStillValid(self, verbose=False):
# Check if we still have new places to explore
if self.spots:
e = explorer.Explorer()
e.visit(self.__logic.start)
valid = False
for loc in e.getAccessableLocations():
for ii in loc.items:
if ii in self.spots:
valid = True
if not valid:
if verbose:
print("Can no longer find new locations to explore")
return False
# Check if we can still place all our items
if not self.canStillPlaceItemPool(verbose):
if verbose:
print("Can no longer place our item pool")
return False
# Finally, check if the logic still makes everything accessible when we have all the items.
e = explorer.Explorer()
for item_pool_item, count in self.item_pool.items():
for n in range(count):
e.addItem(item_pool_item)
e.visit(self.__logic.start)
if len(e.getAccessableLocations()) != len(self.__logic.location_list):
if verbose:
for loc in self.__logic.location_list:
if loc not in e.getAccessableLocations():
print("Cannot access: ", loc.items)
print("Not all locations are accessible anymore with the full item pool")
return False
return True
def canStillPlaceItemPool(self, verbose=False):
# For each item in the pool, find which spots are available.
# Then, from the hardest to place item to the easy stuff strip the availability pool
item_spots = {}
for spot in self.spots:
for option in spot.getOptions():
if option not in item_spots:
item_spots[option] = set()
item_spots[option].add(spot)
for item in sorted(self.item_pool.keys(), key=lambda item: len(item_spots.get(item, set()))):
spots = item_spots.get(item, set())
for n in range(self.item_pool.get(item, 0)):
if verbose:
print(n, item, spots)
if not spots:
return False
spot = next(iter(spots))
for spot_set in item_spots.values():
if spot in spot_set:
spot_set.remove(spot)
return True
| [
"itertools.accumulate",
"copy.deepcopy",
"logic.MultiworldLogic",
"random.Random",
"os.urandom",
"logic.Logic",
"explorer.Explorer"
] | [((386, 410), 'random.Random', 'random.Random', (['self.seed'], {}), '(self.seed)\n', (399, 410), False, 'import random\n'), ((5800, 5819), 'explorer.Explorer', 'explorer.Explorer', ([], {}), '()\n', (5817, 5819), False, 'import explorer\n'), ((351, 365), 'os.urandom', 'os.urandom', (['(16)'], {}), '(16)\n', (361, 365), False, 'import os\n'), ((730, 770), 'logic.MultiworldLogic', 'logic.MultiworldLogic', (['options', 'self.rnd'], {}), '(options, self.rnd)\n', (751, 770), False, 'import logic\n'), ((814, 844), 'logic.Logic', 'logic.Logic', (['options', 'self.rnd'], {}), '(options, self.rnd)\n', (825, 844), False, 'import logic\n'), ((4097, 4126), 'itertools.accumulate', 'itertools.accumulate', (['weights'], {}), '(weights)\n', (4117, 4126), False, 'import itertools\n'), ((5047, 5066), 'explorer.Explorer', 'explorer.Explorer', ([], {}), '()\n', (5064, 5066), False, 'import explorer\n'), ((1722, 1740), 'copy.deepcopy', 'copy.deepcopy', (['rom'], {}), '(rom)\n', (1735, 1740), False, 'import copy\n')] |
import numpy as np
from scipy.fft import fft
def CAR(X, labels):
N = X.shape
N_classes = len(np.unique(labels))
data10 = np.zeros((N[0], N[1], 1))
data11 = np.zeros((N[0], N[1], 1))
data12 = np.zeros((N[0], N[1], 1))
data13 = np.zeros((N[0], N[1], 1))
for trial in range(N[2]): ## Média de cada um os trials de todos canais
data = X[:,:,trial]
X_med = np.mean(data, axis = 1).reshape(data.shape[0])
data_car = data - X_med.reshape((X.shape[0],1))
if (labels[trial] == 0):
data10 = np.append(data10, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)#append na terceira dimensão, dos trials
elif (labels[trial] == 1):
data11 = np.append(data11, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
elif (labels[trial] == 2):
data12 = np.append(data12, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
elif (labels[trial] == 3):
data13 = np.append(data13, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
data10 = np.delete(data10, 0, axis=2)
data11 = np.delete(data11, 0, axis=2)
data12 = np.delete(data12, 0, axis=2)
data13 = np.delete(data13, 0, axis=2)
return data10,data11,data12,data13
def Ext_fft (N, fs, data10, data11, data12, data13, out_chans):
"""
args:
N -> x.shape
fs -> sampling frequency
dataX -> matrix (1536,16, 12)
out_chan -> canais a serem excluidos
"""
N_class = 4; N_trials = 12; n_harmonicas = 2
N_pos = ((N[0]/fs)*np.array([np.array([10,11,12,13])*i for i in range(1,n_harmonicas+1)])).ravel().astype(int)
val_chans = np.array(range(1,17))
val_chans = np.delete(val_chans, [np.where(val_chans == c) for c in out_chans]) #Cria array(1:16) dps exclui os valores a partir de out_chan
N_chans = val_chans.shape[0]
n_features = N_pos.shape[0]
F_dez=np.zeros((N_trials,N_chans*N_class*n_harmonicas)) #vetor de trials X (canais*classes)
F_onze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
F_doze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
F_treze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
for trial in range(0,N_trials):
Chans_XY=0
for chans in val_chans-1:
a = abs(fft(data10[:,chans,trial])) # roda pela posição de N_pos 10,11,12,13
b = abs(fft(data11[:,chans,trial]))
c = abs(fft(data12[:,chans,trial]))
d = abs(fft(data13[:,chans,trial]))
F_dez[trial,Chans_XY+np.array(range(0,n_features))] = a[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_onze[trial,Chans_XY+np.array(range(0,n_features))] = b[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_doze[trial,Chans_XY+np.array(range(0,n_features))] = c[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_treze[trial,Chans_XY+np.array(range(0,n_features))] = d[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
Chans_XY += n_features
return F_dez, F_onze, F_doze, F_treze
def CAR_FFT(X,labels, fs):
# FILTRO CAR
d10, d11, d12, d13 = CAR(X,labels)
# EXTRAÇÃO FFT
out_chans = []
#out_chans = [1, 2, 3, 4, 10, 14, 15,16]
F_dez, F_onze, F_doze, F_treze = Ext_fft (*(X.shape, fs, d10, d11, d12, d13), out_chans = out_chans)
F_all = np.vstack([F_dez, F_onze, F_doze, F_treze])
return F_all | [
"numpy.mean",
"numpy.unique",
"numpy.where",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"scipy.fft.fft"
] | [((140, 165), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (148, 165), True, 'import numpy as np\n'), ((179, 204), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (187, 204), True, 'import numpy as np\n'), ((218, 243), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (226, 243), True, 'import numpy as np\n'), ((257, 282), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (265, 282), True, 'import numpy as np\n'), ((1161, 1189), 'numpy.delete', 'np.delete', (['data10', '(0)'], {'axis': '(2)'}), '(data10, 0, axis=2)\n', (1170, 1189), True, 'import numpy as np\n'), ((1203, 1231), 'numpy.delete', 'np.delete', (['data11', '(0)'], {'axis': '(2)'}), '(data11, 0, axis=2)\n', (1212, 1231), True, 'import numpy as np\n'), ((1245, 1273), 'numpy.delete', 'np.delete', (['data12', '(0)'], {'axis': '(2)'}), '(data12, 0, axis=2)\n', (1254, 1273), True, 'import numpy as np\n'), ((1287, 1315), 'numpy.delete', 'np.delete', (['data13', '(0)'], {'axis': '(2)'}), '(data13, 0, axis=2)\n', (1296, 1315), True, 'import numpy as np\n'), ((2027, 2081), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2035, 2081), True, 'import numpy as np\n'), ((2124, 2178), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2132, 2178), True, 'import numpy as np\n'), ((2185, 2239), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2193, 2239), True, 'import numpy as np\n'), ((2247, 2301), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2255, 2301), True, 'import numpy as np\n'), ((3590, 3633), 'numpy.vstack', 'np.vstack', (['[F_dez, F_onze, F_doze, F_treze]'], {}), '([F_dez, F_onze, F_doze, F_treze])\n', (3599, 3633), True, 'import numpy as np\n'), ((103, 120), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (112, 120), True, 'import numpy as np\n'), ((1840, 1864), 'numpy.where', 'np.where', (['(val_chans == c)'], {}), '(val_chans == c)\n', (1848, 1864), True, 'import numpy as np\n'), ((408, 429), 'numpy.mean', 'np.mean', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (415, 429), True, 'import numpy as np\n'), ((2411, 2439), 'scipy.fft.fft', 'fft', (['data10[:, chans, trial]'], {}), '(data10[:, chans, trial])\n', (2414, 2439), False, 'from scipy.fft import fft\n'), ((2500, 2528), 'scipy.fft.fft', 'fft', (['data11[:, chans, trial]'], {}), '(data11[:, chans, trial])\n', (2503, 2528), False, 'from scipy.fft import fft\n'), ((2548, 2576), 'scipy.fft.fft', 'fft', (['data12[:, chans, trial]'], {}), '(data12[:, chans, trial])\n', (2551, 2576), False, 'from scipy.fft import fft\n'), ((2596, 2624), 'scipy.fft.fft', 'fft', (['data13[:, chans, trial]'], {}), '(data13[:, chans, trial])\n', (2599, 2624), False, 'from scipy.fft import fft\n'), ((1681, 1707), 'numpy.array', 'np.array', (['[10, 11, 12, 13]'], {}), '([10, 11, 12, 13])\n', (1689, 1707), True, 'import numpy as np\n')] |
from typing import Optional, Callable, List
import torch as tc
import numpy as np
from drl.agents.architectures.stateless.abstract import StatelessArchitecture
class Identity(StatelessArchitecture):
"""
Identity architecture. Useful for unit testing.
"""
def __init__(
self,
input_shape: List[int],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]]):
"""
Args:
input_dim (List[int]): Input shape.
w_init (Optional[Callable[[tc.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[tc.Tensor], None]]): Bias initializer.
"""
super().__init__(w_init, b_init)
self._input_shape = input_shape
@property
def input_shape(self) -> List[int]:
return self._input_shape
@property
def output_dim(self) -> int:
return np.prod(self._input_shape)
def forward(self, x, **kwargs):
assert list(x.shape[1:]) == self.input_shape
features = x.reshape(-1, self.output_dim)
return features
| [
"numpy.prod"
] | [((938, 964), 'numpy.prod', 'np.prod', (['self._input_shape'], {}), '(self._input_shape)\n', (945, 964), True, 'import numpy as np\n')] |
import geog
import networkx as nx
import osmgraph
# By default any way with a highway tag will be loaded
g = osmgraph.parse_file('hawaii-latest.osm.bz2') # or .osm or .pbf
for n1, n2 in g.edges_iter():
c1, c2 = osmgraph.tools.coordinates(g, (n1, n2))
g[n1][n2]['length'] = geog.distance(c1, c2)
import random
start = random.choice(g.nodes())
end = random.choice(g.nodes())
path = nx.shortest_path(g, start, end, 'length')
coords = osmgraph.tools.coordinates(g, path)
# Find the sequence of roads to get from start to end
edge_names = [g[n1][n2].get('name') for n1, n2 in osmgraph.tools.pairwise(path)]
import itertools
names = [k for k, v in itertools.groupby(edge_names)]
print(names)
# Visualize the path using geojsonio.py
import geojsonio
import json
geojsonio.display(json.dumps({'type': 'LineString', 'coordinates': coords})) | [
"osmgraph.parse_file",
"osmgraph.tools.coordinates",
"itertools.groupby",
"json.dumps",
"geog.distance",
"networkx.shortest_path",
"osmgraph.tools.pairwise"
] | [((110, 154), 'osmgraph.parse_file', 'osmgraph.parse_file', (['"""hawaii-latest.osm.bz2"""'], {}), "('hawaii-latest.osm.bz2')\n", (129, 154), False, 'import osmgraph\n'), ((395, 436), 'networkx.shortest_path', 'nx.shortest_path', (['g', 'start', 'end', '"""length"""'], {}), "(g, start, end, 'length')\n", (411, 436), True, 'import networkx as nx\n'), ((446, 481), 'osmgraph.tools.coordinates', 'osmgraph.tools.coordinates', (['g', 'path'], {}), '(g, path)\n', (472, 481), False, 'import osmgraph\n'), ((217, 256), 'osmgraph.tools.coordinates', 'osmgraph.tools.coordinates', (['g', '(n1, n2)'], {}), '(g, (n1, n2))\n', (243, 256), False, 'import osmgraph\n'), ((286, 307), 'geog.distance', 'geog.distance', (['c1', 'c2'], {}), '(c1, c2)\n', (299, 307), False, 'import geog\n'), ((791, 848), 'json.dumps', 'json.dumps', (["{'type': 'LineString', 'coordinates': coords}"], {}), "({'type': 'LineString', 'coordinates': coords})\n", (801, 848), False, 'import json\n'), ((587, 616), 'osmgraph.tools.pairwise', 'osmgraph.tools.pairwise', (['path'], {}), '(path)\n', (610, 616), False, 'import osmgraph\n'), ((658, 687), 'itertools.groupby', 'itertools.groupby', (['edge_names'], {}), '(edge_names)\n', (675, 687), False, 'import itertools\n')] |
from random import random
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.graphics import Color
from pyobjus import autoclass
class Ball(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
h = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PyobjusGame(Widget):
ball = ObjectProperty(None)
screen = ObjectProperty(autoclass('UIScreen').mainScreen())
bridge = ObjectProperty(autoclass('bridge').alloc().init())
sensitivity = ObjectProperty(50)
br_slider = ObjectProperty(None)
def __init__(self, *args, **kwargs):
super(PyobjusGame, self).__init__()
self.bridge.startAccelerometer()
def __dealloc__(self, *args, **kwargs):
self.bridge.stopAccelerometer()
super(PyobjusGame, self).__dealloc__()
def reset_ball_pos(self):
self.ball.pos = self.width / 2, self.height / 2
def on_bright_slider_change(self):
self.screen.brightness = self.br_slider.value
def update(self, dt):
self.ball.move()
self.ball.velocity_x = self.bridge.ac_x * self.sensitivity
self.ball.velocity_y = self.bridge.ac_y * self.sensitivity
if (self.ball.y < 0) or (self.ball.top >= self.height):
self.reset_ball_pos()
self.ball.h = random()
if (self.ball.x < 0) or (self.ball.right >= self.width):
self.reset_ball_pos()
self.ball.h = random()
class PyobjusBallApp(App):
def build(self):
game = PyobjusGame()
Clock.schedule_interval(game.update, 1.0/60.0)
return game
if __name__ == '__main__':
PyobjusBallApp().run()
| [
"kivy.properties.NumericProperty",
"pyobjus.autoclass",
"random.random",
"kivy.vector.Vector",
"kivy.clock.Clock.schedule_interval",
"kivy.properties.ReferenceListProperty",
"kivy.properties.ObjectProperty"
] | [((330, 348), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (345, 348), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((366, 384), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (381, 384), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((393, 411), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (408, 411), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((427, 472), 'kivy.properties.ReferenceListProperty', 'ReferenceListProperty', (['velocity_x', 'velocity_y'], {}), '(velocity_x, velocity_y)\n', (448, 472), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((587, 607), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (601, 607), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((754, 772), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['(50)'], {}), '(50)\n', (768, 772), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((789, 809), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (803, 809), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((1793, 1841), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['game.update', '(1.0 / 60.0)'], {}), '(game.update, 1.0 / 60.0)\n', (1816, 1841), False, 'from kivy.clock import Clock\n'), ((513, 535), 'kivy.vector.Vector', 'Vector', (['*self.velocity'], {}), '(*self.velocity)\n', (519, 535), False, 'from kivy.vector import Vector\n'), ((1561, 1569), 'random.random', 'random', ([], {}), '()\n', (1567, 1569), False, 'from random import random\n'), ((1696, 1704), 'random.random', 'random', ([], {}), '()\n', (1702, 1704), False, 'from random import random\n'), ((636, 657), 'pyobjus.autoclass', 'autoclass', (['"""UIScreen"""'], {}), "('UIScreen')\n", (645, 657), False, 'from pyobjus import autoclass\n'), ((700, 719), 'pyobjus.autoclass', 'autoclass', (['"""bridge"""'], {}), "('bridge')\n", (709, 719), False, 'from pyobjus import autoclass\n')] |
import os, sys
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
try:
from data_handle.mid_object import *
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from data_handle.mid_object import *
'''
There are 3 kinds of folder structure:
1. [1c] data_dir - obj
2. [2c] data_dir - obj - obj&env (each object has its own environment)
3. [2c] data_dir - obj&env (all objects share one environment)
(2 & 3 is compatible with 1 to some extent)
Functions 'gen_csv_trackers' and 'gather_all_data' are universal.
They only depend on the data structure version.
'''
def gen_csv_trackers(data_dir, channel_per_image=2): # Data structure 2
# data_dir - obj - obj&env (each object has its own environment)
cpi = channel_per_image
obj_folders = os.listdir(data_dir)
for objf in obj_folders:
if cpi == 1:
obj_files = os.listdir(data_dir+objf) # all files/images under this folder
else:
obj_files = os.listdir(os.path.join(data_dir,objf,'obj')) # all files/images under this folder
t_list = [] # time or time step
x_list = [] # x coordinate
y_list = [] # y coordinate
idx_list = [] # more information (e.g. scene index)
invalid_files = []
for f in obj_files:
info = f[:-4] # the last for characters are filename extensions
try:
t_list.append(int(info.split('_')[1]))
x_list.append(float(info.split('_')[2]))
y_list.append(float(info.split('_')[3]))
idx_list.append(int(info.split('_')[4]))
except:
invalid_files.append(f)
continue
for f in invalid_files:
obj_files.remove(f)
df = pd.DataFrame({'f':obj_files,'t':t_list,'x':x_list,'y':y_list, 'index':idx_list}).sort_values(by='t', ignore_index=True)
if cpi == 1:
df.to_csv(os.path.join(data_dir, objf, '/data.csv'), index=False)
else:
df.to_csv(os.path.join(data_dir, objf, 'obj/data.csv'), index=False)
def gather_all_data(data_dir, past, maxT, channel_per_image=2, minT=1, period=1, save_dir=None): # Data structure 2
# data_dir - objf(1,2,...) - obj&env
if save_dir is None:
save_dir = data_dir
cpi = channel_per_image
column_name = [f'f{i}' for i in range(0,cpi*(past+1))] + ['T', 'x', 'y', 'index']
df_all = pd.DataFrame(columns=column_name)
obj_folders = os.listdir(data_dir)
cnt = 0
for objf in obj_folders:
cnt += 1
print(f'\rProcess {cnt}/{len(obj_folders)}', end=' ')
if cpi == 1:
df_obj = pd.read_csv(os.path.join(data_dir, objf, 'data.csv')) # generated by "gen_csv_trackers"
else:
df_obj = pd.read_csv(os.path.join(data_dir, objf, 'obj/data.csv')) # generated by "gen_csv_trackers"
for T in range(minT,maxT+1):
sample_list = []
for i in range(len(df_obj)-past*period-T): # each sample
sample = []
################## Sample START ##################
for j in range(past+1):
obj_filename = df_obj.iloc[i+j*period]['f']
sample.append(obj_filename)
if cpi == 2:
sample.append(obj_filename.split('_')[0]+'_'+obj_filename.split('_')[1]+'_'+obj_filename.split('_')[-1])
sample.append(T)
sample.append(df_obj.iloc[i+past+T]['x'])
sample.append(df_obj.iloc[i+past+T]['y'])
sample.append(df_obj.iloc[i+past+T]['index'])
################## Sample E N D ##################
sample_list.append(sample)
df_T = pd.DataFrame(sample_list, columns=df_all.columns)
df_all = pd.concat([df_all, df_T], ignore_index=True)
df_all.to_csv(os.path.join(save_dir, 'all_data.csv'), index=False)
def save_MID_data(index_list, save_path, sim_time_per_scene:int, channel_per_image=2, dots_per_inch=None):
# MID - Multiple-scene Interaction Dataset
cpi = channel_per_image
dpi = dots_per_inch
cnt = 0
overall_sim_time = sim_time_per_scene * len(index_list)
for idx in index_list:
boundary_coords, obstacle_list, nchoices = return_Map(index=idx) # map parameters
target_size, ts = (0.5, 0.2) # object parameters
choice_list = list(range(1,nchoices+1))
for ch in choice_list:
for i in range(sim_time_per_scene//nchoices):
cnt += 1
print(f'\rSimulating: {cnt}/{overall_sim_time} ', end='')
inflation = 0
stagger = 0.4 + (random.randint(0, 20)/10-1) * 0.2
vmax = 1 + (random.randint(0, 20)/10-1) * 0.3
if i<((sim_time_per_scene//nchoices)//2):
ref_path = get_ref_path(index=idx, choice=ch, reverse=False)
else:
ref_path = get_ref_path(index=idx, choice=ch, reverse=True)
graph = Graph(boundary_coords, obstacle_list, inflation=inflation)
obj = MovingObject(ref_path[0], stagger=stagger)
obj.run(ref_path, ts, vmax)
### Generate images
for j, tr in enumerate(obj.traj):
# images containing everything
shape = patches.Circle(tr, radius=target_size/2, fc='k')
if cpi == 1:
# images containing everything
_, ax = plt.subplots(dpi=dpi)
graph.plot_map(ax, clean=True) ### NOTE change this
ax.add_patch(shape)
ax.set_aspect('equal', 'box')
ax.axis('off')
if save_path is None:
plt.show()
else:
folder = os.path.join(save_path,f'{cnt}/')
Path(folder).mkdir(parents=True, exist_ok=True)
plt.savefig(os.path.join(folder,f'{cnt}_{j}_{round(tr[0],4)}_{round(tr[1],4)}_{idx}.png'),
bbox_inches='tight', pad_inches=0, dpi=dpi)
plt.close()
elif cpi == 2:
# images containing only object
_, ax1 = plt.subplots(dpi=dpi)
graph.plot_map(ax1, empty=True) ### NOTE change this
ax1.add_patch(shape)
ax1.set_aspect('equal', 'box')
ax1.axis('off')
if save_path is None:
plt.show()
else:
folder = os.path.join(save_path,f'{cnt}/obj')
Path(folder).mkdir(parents=True, exist_ok=True)
plt.savefig(os.path.join(folder,f'{cnt}_{j}_{round(tr[0],4)}_{round(tr[1],4)}_{idx}.png'),
bbox_inches='tight', pad_inches=0, dpi=dpi)
plt.close()
# images containing env
_, ax2 = plt.subplots(dpi=dpi)
graph.plot_map(ax2, clean=True) ### NOTE change this
ax2.set_aspect('equal', 'box')
ax2.axis('off')
if save_path is None:
plt.show()
else:
folder = os.path.join(save_path,f'{cnt}/env')
Path(folder).mkdir(parents=True, exist_ok=True)
plt.savefig(os.path.join(folder,f'{cnt}_{j}_{idx}.png'),
bbox_inches='tight', pad_inches=0, dpi=dpi)
plt.close()
else:
raise(ModuleNotFoundError('CPI must be 1 or 2.'))
print()
| [
"os.listdir",
"pathlib.Path",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.dirname",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.patches.Circle",
"matplotlib.pyplot.show"
] | [((871, 891), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (881, 891), False, 'import os, sys\n'), ((2517, 2550), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_name'}), '(columns=column_name)\n', (2529, 2550), True, 'import pandas as pd\n'), ((2569, 2589), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2579, 2589), False, 'import os, sys\n'), ((3988, 4026), 'os.path.join', 'os.path.join', (['save_dir', '"""all_data.csv"""'], {}), "(save_dir, 'all_data.csv')\n", (4000, 4026), False, 'import os, sys\n'), ((966, 993), 'os.listdir', 'os.listdir', (['(data_dir + objf)'], {}), '(data_dir + objf)\n', (976, 993), False, 'import os, sys\n'), ((3854, 3903), 'pandas.DataFrame', 'pd.DataFrame', (['sample_list'], {'columns': 'df_all.columns'}), '(sample_list, columns=df_all.columns)\n', (3866, 3903), True, 'import pandas as pd\n'), ((3925, 3969), 'pandas.concat', 'pd.concat', (['[df_all, df_T]'], {'ignore_index': '(True)'}), '([df_all, df_T], ignore_index=True)\n', (3934, 3969), True, 'import pandas as pd\n'), ((238, 263), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (253, 263), False, 'import os, sys\n'), ((1078, 1113), 'os.path.join', 'os.path.join', (['data_dir', 'objf', '"""obj"""'], {}), "(data_dir, objf, 'obj')\n", (1090, 1113), False, 'import os, sys\n'), ((1862, 1954), 'pandas.DataFrame', 'pd.DataFrame', (["{'f': obj_files, 't': t_list, 'x': x_list, 'y': y_list, 'index': idx_list}"], {}), "({'f': obj_files, 't': t_list, 'x': x_list, 'y': y_list,\n 'index': idx_list})\n", (1874, 1954), True, 'import pandas as pd\n'), ((2025, 2066), 'os.path.join', 'os.path.join', (['data_dir', 'objf', '"""/data.csv"""'], {}), "(data_dir, objf, '/data.csv')\n", (2037, 2066), False, 'import os, sys\n'), ((2117, 2161), 'os.path.join', 'os.path.join', (['data_dir', 'objf', '"""obj/data.csv"""'], {}), "(data_dir, objf, 'obj/data.csv')\n", (2129, 2161), False, 'import os, sys\n'), ((2767, 2807), 'os.path.join', 'os.path.join', (['data_dir', 'objf', '"""data.csv"""'], {}), "(data_dir, objf, 'data.csv')\n", (2779, 2807), False, 'import os, sys\n'), ((2890, 2934), 'os.path.join', 'os.path.join', (['data_dir', 'objf', '"""obj/data.csv"""'], {}), "(data_dir, objf, 'obj/data.csv')\n", (2902, 2934), False, 'import os, sys\n'), ((5536, 5586), 'matplotlib.patches.Circle', 'patches.Circle', (['tr'], {'radius': '(target_size / 2)', 'fc': '"""k"""'}), "(tr, radius=target_size / 2, fc='k')\n", (5550, 5586), True, 'import matplotlib.patches as patches\n'), ((5706, 5727), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (5718, 5727), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6023, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6093, 6127), 'os.path.join', 'os.path.join', (['save_path', 'f"""{cnt}/"""'], {}), "(save_path, f'{cnt}/')\n", (6105, 6127), False, 'import os, sys\n'), ((6435, 6446), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6444, 6446), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6592), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (6583, 6592), True, 'import matplotlib.pyplot as plt\n'), ((7400, 7421), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (7412, 7421), True, 'import matplotlib.pyplot as plt\n'), ((6884, 6894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6892, 6894), True, 'import matplotlib.pyplot as plt\n'), ((6962, 6999), 'os.path.join', 'os.path.join', (['save_path', 'f"""{cnt}/obj"""'], {}), "(save_path, f'{cnt}/obj')\n", (6974, 6999), False, 'import os, sys\n'), ((7307, 7318), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7316, 7318), True, 'import matplotlib.pyplot as plt\n'), ((7668, 7678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7676, 7678), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7783), 'os.path.join', 'os.path.join', (['save_path', 'f"""{cnt}/env"""'], {}), "(save_path, f'{cnt}/env')\n", (7758, 7783), False, 'import os, sys\n'), ((8057, 8068), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8066, 8068), True, 'import matplotlib.pyplot as plt\n'), ((6155, 6167), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (6159, 6167), False, 'from pathlib import Path\n'), ((7899, 7943), 'os.path.join', 'os.path.join', (['folder', 'f"""{cnt}_{j}_{idx}.png"""'], {}), "(folder, f'{cnt}_{j}_{idx}.png')\n", (7911, 7943), False, 'import os, sys\n'), ((7027, 7039), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (7031, 7039), False, 'from pathlib import Path\n'), ((7811, 7823), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (7815, 7823), False, 'from pathlib import Path\n')] |
from setuptools import setup, find_packages
setup(
name = 'fundfind',
version = '0.1',
packages = find_packages(),
url = 'http://fundfind.cottagelabs.com',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'fundfind - an Open way to share, visualise and map out scholarly funding opportunities',
license = 'MIT',
# TODO look for other potentially useful classifiers
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
install_requires = [
'werkzeug==0.8.3',
'Flask==0.9',
'Flask-Login==0.1.3',
'Flask-WTF==0.8.2',
'pyes==0.16',
'requests==1.1.0',
'parsedatetime==0.8.7',
],
)
| [
"setuptools.find_packages"
] | [((111, 126), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (124, 126), False, 'from setuptools import setup, find_packages\n')] |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from kmip.core import errors
from kmip.core import primitives
from kmip.core import utils
class TestTextString(testtools.TestCase):
def setUp(self):
super(TestTextString, self).setUp()
self.stream = utils.BytearrayStream()
self.bad_type = errors.ErrorStrings.BAD_EXP_RECV.format(
'primitives.TextString.{0}', 'type', '{1}', '{2}')
self.bad_value = errors.ErrorStrings.BAD_EXP_RECV.format(
'primitives.TextString.{0}', 'value', '{1}', '{2}')
self.bad_read = errors.ErrorStrings.BAD_EXP_RECV.format(
'primitives.TextString.{0}', '', '{1}', '{2}')
self.bad_write = errors.ErrorStrings.BAD_EXP_RECV.format(
'primitives.TextString.{0}', 'write', '{1}', '{2}')
self.bad_encoding = errors.ErrorStrings.BAD_ENCODING.format(
'primitives.TextString', '')
self.bad_length = errors.ErrorStrings.BAD_EXP_RECV.format(
'primitives.TextString', 'length', '{0} bytes', '{1} bytes')
def tearDown(self):
super(TestTextString, self).tearDown()
def test_init(self):
value = 'Hello World'
ts = primitives.TextString(value)
self.assertIsInstance(ts.value, str,
self.bad_type.format('value', str,
type(ts.value)))
self.assertEqual(value, ts.value,
self.bad_value.format('value', value, ts.value))
def test_init_unset(self):
text_string = primitives.TextString()
expected = six.string_types
observed = text_string.value
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertIsInstance(observed, expected, msg)
expected = ''
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_validate_on_valid(self):
ts = primitives.TextString()
ts.value = 'Hello World'
# Check no exception thrown.
ts.validate()
def test_validate_on_valid_unset(self):
ts = primitives.TextString()
# Check no exception thrown.
ts.validate()
def test_validate_on_invalid_type(self):
ts = primitives.TextString()
ts.value = 0
self.assertRaises(TypeError, ts.validate)
def test_read_value(self):
encoding = (
b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x00\x00\x00\x00'
b'\x00')
self.stream = utils.BytearrayStream(encoding)
ts = primitives.TextString()
ts.length = 0x0B
ts.read_value(self.stream)
expected = 'Hello World'
self.assertEqual(expected, ts.value,
self.bad_read.format('value', expected, ts.value))
def test_read_value_no_padding(self):
encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F')
self.stream = utils.BytearrayStream(encoding)
ts = primitives.TextString()
ts.length = 0x08
ts.read_value(self.stream)
expected = 'Hello Wo'
self.assertEqual(expected, ts.value,
self.bad_read.format('value', expected, ts.value))
def test_read_value_max_padding(self):
encoding = (b'\x48\x00\x00\x00\x00\x00\x00\x00')
self.stream = utils.BytearrayStream(encoding)
ts = primitives.TextString()
ts.length = 0x01
ts.read_value(self.stream)
expected = 'H'
self.assertEqual(expected, ts.value,
self.bad_read.format('value', expected, ts.value))
def test_read(self):
encoding = (
b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20\x57'
b'\x6F\x72\x6C\x64\x00\x00\x00\x00\x00')
self.stream = utils.BytearrayStream(encoding)
ts = primitives.TextString()
ts.read(self.stream)
expected = 'Hello World'
self.assertEqual(expected, ts.value,
self.bad_read.format('value', expected, ts.value))
def test_read_on_invalid_padding(self):
encoding = (
b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20\x57'
b'\x6F\x72\x6C\x64\xff\xff\xff\xff\xff')
self.stream = utils.BytearrayStream(encoding)
ts = primitives.TextString()
self.assertRaises(errors.ReadValueError, ts.read, self.stream)
def test_write_value(self):
encoding = (
b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x00\x00\x00\x00'
b'\x00')
self.stream = utils.BytearrayStream()
value = 'Hello World'
ts = primitives.TextString(value)
ts.write_value(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.bad_length.format(len_exp, len_rcv))
self.assertEqual(encoding, result, self.bad_encoding)
def test_write_value_no_padding(self):
encoding = (b'\x48\x65\x6C\x6C\x6F\x20\x57\x6F')
self.stream = utils.BytearrayStream()
value = 'Hello Wo'
ts = primitives.TextString(value)
ts.write_value(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.bad_length.format(len_exp, len_rcv))
self.assertEqual(encoding, result, self.bad_encoding)
def test_write_value_max_padding(self):
encoding = (b'\x48\x00\x00\x00\x00\x00\x00\x00')
self.stream = utils.BytearrayStream()
value = 'H'
ts = primitives.TextString(value)
ts.write_value(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.bad_length.format(len_exp, len_rcv))
self.assertEqual(encoding, result, self.bad_encoding)
def test_write(self):
encoding = (
b'\x42\x00\x00\x07\x00\x00\x00\x0B\x48\x65\x6C\x6C\x6F\x20\x57'
b'\x6F\x72\x6C\x64\x00\x00\x00\x00\x00')
self.stream = utils.BytearrayStream()
value = 'Hello World'
ts = primitives.TextString(value)
ts.write(self.stream)
result = self.stream.read()
len_exp = len(encoding)
len_rcv = len(result)
self.assertEqual(len_exp, len_rcv,
self.bad_length.format(len_exp, len_rcv))
self.assertEqual(encoding, result, self.bad_encoding)
| [
"kmip.core.utils.BytearrayStream",
"kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format",
"kmip.core.primitives.TextString",
"kmip.core.errors.ErrorStrings.BAD_ENCODING.format"
] | [((897, 920), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', ([], {}), '()\n', (918, 920), False, 'from kmip.core import utils\n'), ((945, 1039), 'kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format', 'errors.ErrorStrings.BAD_EXP_RECV.format', (['"""primitives.TextString.{0}"""', '"""type"""', '"""{1}"""', '"""{2}"""'], {}), "('primitives.TextString.{0}', 'type',\n '{1}', '{2}')\n", (984, 1039), False, 'from kmip.core import errors\n'), ((1074, 1169), 'kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format', 'errors.ErrorStrings.BAD_EXP_RECV.format', (['"""primitives.TextString.{0}"""', '"""value"""', '"""{1}"""', '"""{2}"""'], {}), "('primitives.TextString.{0}',\n 'value', '{1}', '{2}')\n", (1113, 1169), False, 'from kmip.core import errors\n'), ((1203, 1293), 'kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format', 'errors.ErrorStrings.BAD_EXP_RECV.format', (['"""primitives.TextString.{0}"""', '""""""', '"""{1}"""', '"""{2}"""'], {}), "('primitives.TextString.{0}', '',\n '{1}', '{2}')\n", (1242, 1293), False, 'from kmip.core import errors\n'), ((1328, 1423), 'kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format', 'errors.ErrorStrings.BAD_EXP_RECV.format', (['"""primitives.TextString.{0}"""', '"""write"""', '"""{1}"""', '"""{2}"""'], {}), "('primitives.TextString.{0}',\n 'write', '{1}', '{2}')\n", (1367, 1423), False, 'from kmip.core import errors\n'), ((1461, 1529), 'kmip.core.errors.ErrorStrings.BAD_ENCODING.format', 'errors.ErrorStrings.BAD_ENCODING.format', (['"""primitives.TextString"""', '""""""'], {}), "('primitives.TextString', '')\n", (1500, 1529), False, 'from kmip.core import errors\n'), ((1569, 1673), 'kmip.core.errors.ErrorStrings.BAD_EXP_RECV.format', 'errors.ErrorStrings.BAD_EXP_RECV.format', (['"""primitives.TextString"""', '"""length"""', '"""{0} bytes"""', '"""{1} bytes"""'], {}), "('primitives.TextString', 'length',\n '{0} bytes', '{1} bytes')\n", (1608, 1673), False, 'from kmip.core import errors\n'), ((1824, 1852), 'kmip.core.primitives.TextString', 'primitives.TextString', (['value'], {}), '(value)\n', (1845, 1852), False, 'from kmip.core import primitives\n'), ((2202, 2225), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (2223, 2225), False, 'from kmip.core import primitives\n'), ((2622, 2645), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (2643, 2645), False, 'from kmip.core import primitives\n'), ((2797, 2820), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (2818, 2820), False, 'from kmip.core import primitives\n'), ((2940, 2963), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (2961, 2963), False, 'from kmip.core import primitives\n'), ((3208, 3239), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', (['encoding'], {}), '(encoding)\n', (3229, 3239), False, 'from kmip.core import utils\n'), ((3253, 3276), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (3274, 3276), False, 'from kmip.core import primitives\n'), ((3614, 3645), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', (['encoding'], {}), '(encoding)\n', (3635, 3645), False, 'from kmip.core import utils\n'), ((3659, 3682), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (3680, 3682), False, 'from kmip.core import primitives\n'), ((4018, 4049), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', (['encoding'], {}), '(encoding)\n', (4039, 4049), False, 'from kmip.core import utils\n'), ((4063, 4086), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (4084, 4086), False, 'from kmip.core import primitives\n'), ((4490, 4521), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', (['encoding'], {}), '(encoding)\n', (4511, 4521), False, 'from kmip.core import utils\n'), ((4535, 4558), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (4556, 4558), False, 'from kmip.core import primitives\n'), ((4960, 4991), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', (['encoding'], {}), '(encoding)\n', (4981, 4991), False, 'from kmip.core import utils\n'), ((5005, 5028), 'kmip.core.primitives.TextString', 'primitives.TextString', ([], {}), '()\n', (5026, 5028), False, 'from kmip.core import primitives\n'), ((5274, 5297), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', ([], {}), '()\n', (5295, 5297), False, 'from kmip.core import utils\n'), ((5341, 5369), 'kmip.core.primitives.TextString', 'primitives.TextString', (['value'], {}), '(value)\n', (5362, 5369), False, 'from kmip.core import primitives\n'), ((5801, 5824), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', ([], {}), '()\n', (5822, 5824), False, 'from kmip.core import utils\n'), ((5865, 5893), 'kmip.core.primitives.TextString', 'primitives.TextString', (['value'], {}), '(value)\n', (5886, 5893), False, 'from kmip.core import primitives\n'), ((6326, 6349), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', ([], {}), '()\n', (6347, 6349), False, 'from kmip.core import utils\n'), ((6383, 6411), 'kmip.core.primitives.TextString', 'primitives.TextString', (['value'], {}), '(value)\n', (6404, 6411), False, 'from kmip.core import primitives\n'), ((6919, 6942), 'kmip.core.utils.BytearrayStream', 'utils.BytearrayStream', ([], {}), '()\n', (6940, 6942), False, 'from kmip.core import utils\n'), ((6986, 7014), 'kmip.core.primitives.TextString', 'primitives.TextString', (['value'], {}), '(value)\n', (7007, 7014), False, 'from kmip.core import primitives\n')] |
import sys, os
for i in ["/task", "/workspace", "/program"]:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + i)
import taskManager, workspaceManager, programManager
commands = workspaceManager.commands + taskManager.commands + programManager.commands
os.environ["workspace"] = ""
def main():
print("\nLegit Programming Todo-App\nhttps://github.com/legit-programming/todo-app\n\n")
for i in commands:
print(f"({i['alias'][0] if len(i['alias']) and len(i['alias'][0]) == 1 else ' '}) {i['name']}: {i['description']}")
while True:
print("")
args = input("> ").split(" ")
command = args.pop(0).lower()
print("")
for i in commands:
if command in i["alias"] or command == i["name"]:
i["function"]()
break
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt or EOFError:
print("\nExiting...")
exit() | [
"os.path.realpath"
] | [((98, 124), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (114, 124), False, 'import sys, os\n')] |
'''
Hash and Acoustic Fingerprint Functions
<NAME>
'''
import numpy as np
def findAdjPts(index,A,delay_time,delta_time,delta_freq):
"Find the three closest adjacent points to the anchor point"
adjPts = []
low_x = A[index][0]+delay_time
high_x = low_x+delta_time
low_y = A[index][1]-delta_freq/2
high_y = A[index][1]+delta_freq/2
for i in A:
if ((i[0]>low_x and i[0]<high_x) and (i[1]>low_y and i[1]<high_y)):
adjPts.append(i)
return adjPts
def hashPeaks(A,songID,delay_time,delta_time,delta_freq):
"Create a matrix of peaks hashed as: [[freq_anchor, freq_other, delta_time], time_anchor, songID]"
hashMatrix = np.zeros((len(A)*100,5)) #Assume size limitation
index = 0
numPeaks = len(A)
for i in range(0,numPeaks):
adjPts = findAdjPts(i,A,delay_time,delta_time,delta_freq)
adjNum=len(adjPts)
for j in range(0,adjNum):
hashMatrix[index][0] = A[i][1]
hashMatrix[index][1] = adjPts[j][1]
hashMatrix[index][2] = adjPts[j][0]-A[i][0]
hashMatrix[index][3] = A[i][0]
hashMatrix[index][4] = songID
index=index+1
hashMatrix = hashMatrix[~np.all(hashMatrix==0,axis=1)]
hashMatrix = np.sort(hashMatrix,axis=0)
return hashMatrix
def hashSamplePeaks(A,delay_time,delta_time,delta_freq):
"Create a matrix of peaks hashed as: [[freq_anchor, freq_other, delta_time],time_anchor]"
hashMatrix = np.zeros((len(A)*100,4))
index = 0
numPeaks = len(A)
for i in range(0,numPeaks):
adjPts = findAdjPts(i,A,delay_time,delta_time,delta_freq)
adjNum = len(adjPts)
for j in range(0,adjNum):
hashMatrix[index][0] = A[i][1]
hashMatrix[index][1] = adjPts[j][1]
hashMatrix[index][2] = adjPts[j][0]-A[i][0]
hashMatrix[index][3] = A[i][0]
index=index+1
hashMatrix = hashMatrix[~np.all(hashMatrix==0,axis=1)]
hashMatrix = np.sort(hashMatrix,axis=0)
return hashMatrix
def findTimePairs(hash_database,sample_hash,deltaTime,deltaFreq):
"Find the matching pairs between sample audio file and the songs in the database"
timePairs = []
for i in sample_hash:
for j in hash_database:
if(i[0] > (j[0]-deltaFreq) and i[0] < (j[0] + deltaFreq)):
if(i[1] > (j[1]-deltaFreq) and i[1] < (j[1] + deltaFreq)):
if(i[2] > (j[2]-deltaTime) and i[2] < (j[2] + deltaTime)):
timePairs.append((j[3],i[3],j[4]))
else:
continue
else:
continue
else:
continue
return timePairs | [
"numpy.all",
"numpy.sort"
] | [((1283, 1310), 'numpy.sort', 'np.sort', (['hashMatrix'], {'axis': '(0)'}), '(hashMatrix, axis=0)\n', (1290, 1310), True, 'import numpy as np\n'), ((2025, 2052), 'numpy.sort', 'np.sort', (['hashMatrix'], {'axis': '(0)'}), '(hashMatrix, axis=0)\n', (2032, 2052), True, 'import numpy as np\n'), ((1236, 1267), 'numpy.all', 'np.all', (['(hashMatrix == 0)'], {'axis': '(1)'}), '(hashMatrix == 0, axis=1)\n', (1242, 1267), True, 'import numpy as np\n'), ((1978, 2009), 'numpy.all', 'np.all', (['(hashMatrix == 0)'], {'axis': '(1)'}), '(hashMatrix == 0, axis=1)\n', (1984, 2009), True, 'import numpy as np\n')] |
from utils.stats_trajectories import trajectory_arclength
import statistics as stats
import numpy as np
import logging
# Returns a matrix of trajectories:
# the entry (i,j) has the paths that go from the goal i to the goal j
def separate_trajectories_between_goals(trajectories, goals_areas):
goals_n = len(goals_areas)
goals = goals_areas[:,1:]
mat = np.empty((goals_n,goals_n),dtype=object)
# Initialize the matrix elements to empty lists
for i in range(goals_n):
for j in range(goals_n):
mat[i][j] = []
not_associated = []
# For all trajectories
for idx,tr in enumerate(trajectories):
x, y = tr[0], tr[1]
traj_len = len(x)
associated_to_goals = False
if traj_len > 2:
# Start and finish points
start_x, start_y = x[0], y[0]
end_x, end_y = x[-1],y[-1]
start_goal, end_goal = None, None
# Find starting and finishing goal
for j in range(goals_n):
if(is_in_area([start_x,start_y], goals[j])):
start_goal = j
for k in range(goals_n):
if(is_in_area([end_x,end_y], goals[k])):
end_goal = k
if start_goal is not None and end_goal is not None:
mat[start_goal][end_goal].append(tr)
associated_to_goals = True
if (not associated_to_goals):
not_associated.append(tr)
return mat,not_associated
# Removes atypical trajectories
def filter_trajectories(trajectories):
n_trajs = len(trajectories)
if n_trajs == 0:
return []
arclen = []
for tr in trajectories:
vec_arclen = trajectory_arclength(tr)
tr_arclen = vec_arclen[-1]
arclen.append(tr_arclen)
# compute the median and SD of the trajectory set
M = stats.median(arclen)
if len(arclen) < 2:
SD = 0.0
else:
SD = stats.stdev(arclen)
# remove trajectories that differ more than 3SD
filtered_set = []
for i in range(n_trajs):
if arclen[i] > 0 and abs(arclen[i] - M) <= 3.0*SD:
filtered_set.append(trajectories[i])
return filtered_set
# Removes atypical trajectories from a multidimensional array
def filter_traj_matrix(raw_path_set_matrix):
all_trajectories = []
# Initialize a nRowsxnCols matrix with empty lists
filtered_matrix = np.empty(raw_path_set_matrix.shape,dtype=object)
for i in range(raw_path_set_matrix.shape[0]):
for j in range(raw_path_set_matrix.shape[1]):
filtered_matrix[i][j] = []
for i in range(raw_path_set_matrix.shape[0]):
for j in range(raw_path_set_matrix.shape[1]):
# If the list of trajectories is non-empty, filter it
if(len(raw_path_set_matrix[i][j]) > 0):
filtered = filter_trajectories(raw_path_set_matrix[i][j])
filtered_matrix[i][j].extend(filtered)
all_trajectories.extend(filtered)
return filtered_matrix, all_trajectories
def start_time(traj):
return traj[2][0]
def get_trajectories_given_time_interval(trajectories, start_time, finish_time):
# Note: the list of trajectories is sorted by initial time
n = len(trajectories)
if n == 0:
logging.error("Empty set")
return []
traj_set = []
i = 0
t = start_time
while(t <= finish_time):
tr = trajectories[i]
t = tr[2][0]
if(start_time <= t and t <= finish_time):
traj_set.append(tr)
i += 1
return traj_set
# Split a trajectory into sub-trajectories between pairs of goals
def break_multigoal_traj(tr, goals):
x, y, t = tr[0], tr[1], tr[2]
traj_set = []
new_x, new_y, new_t = [], [], [] # New trajectory
last_goal = -1 # Last goal
started = False # Flag to indicate that we have started with one goal
for i in range(len(x)):
xy = [x[i], y[i]] # Current position
# Am I in a goal
current_goal = -1
for j in range(len(goals)):
# If the position lies in the goal zone
if is_in_area(xy, goals[j,1:]):
current_goal=j
if current_goal==-1 and last_goal!=-1 and started:
# Split the trajectory just before
traj_set.append([np.array(new_x),np.array(new_y),np.array(new_t)] )
if current_goal==-1 and last_goal!=-1:
# At that point we start the trajectory
# with a point that should be in last_goal
started = True
new_x, new_y, new_t = [x[i-1]], [y[i-1]], [t[i-1]]
last_goal=current_goal
new_x.append(x[i])
new_y.append(y[i])
new_t.append(t[i])
# Coming at the end
if current_goal>0 and i==len(x)-1 and started:
traj_set.append([np.array(new_x),np.array(new_y),np.array(new_t)] )
return traj_set
# Returns 3 lists with the x, y and arc-len values of a trajectory set, respectively
def get_data_from_set(trajectories):
list_x, list_y, list_arclen = [], [], []
for tr in trajectories:
list_x.append(tr[0])
list_y.append(tr[1])
list_arclen.append(trajectory_arclength(tr) )
return list_x, list_y, list_arclen
# Linear regression: f(l) = a + b*l
# Returns the slope of the line and the intercept
def line_parameters(traj, flag):
traj_arclen = trajectory_arclength(traj)
arclen = traj_arclen[-1]
if arclen == 0:
return 0.,0.
x, y = traj[0], traj[1]
if flag == 'x':
b = x[0]
a = (x[-1]-b)/arclen
if flag == 'y':
b = y[0]
a = (y[-1]-b)/arclen
return a, b
# Takes as an input a set of trajectories (between goals)
# and a flag that says whether the orientation is in x or y
def get_linear_prior_mean(trajectories, flag):
n = len(trajectories)
if n == 0:
return [0.,0.,0.]
lineParameters = np.array([ line_parameters(trajectories[i], flag) for i in range(n)])
mean = [np.median(lineParameters[:,0]), np.median(lineParameters[:,1]) ]
var = [np.var(lineParameters[:,0]), np.var(lineParameters[:,1]) ]
cov = np.cov(lineParameters[:,0],lineParameters[:,1])
return mean, var
def arclen_to_time(init_time, arclen, speed):
n = len(arclen)
time = np.zeros(n, dtype=int)
time[0] = init_time
for i in range(1,len(arclen)):
time[i] = int(time[i-1] + (arclen[i]-arclen[i-1])/speed)
return time
# Function to get the ground truth data: n data
def observed_data(traj, n):
if (len(traj)==4):
x, y, l, t = traj
obsX, obsY, obsL, obsT = np.reshape(x[0:n],(-1,1)), np.reshape(y[0:n],(-1,1)), np.reshape(l[0:n],(-1,1)),np.reshape(t[0:n],(-1,1))
obsS = np.reshape(np.divide(np.sqrt(np.square(x[1:n+1]-x[:n])+np.square(y[1:n+1]-y[:n])),t[1:n+1]-t[:n]),(-1,1))
gtX, gtY, gtT = np.reshape(x[n:],(-1,1)), np.reshape(y[n:],(-1,1)),np.reshape(t[n:],(-1,1))
gtS = np.reshape(np.concatenate([np.divide(np.sqrt(np.square(x[n+1:]-x[n:-1])+np.square(y[n+1:]-y[n:-1])),t[n+1:]-t[n:-1]),[0.0]]),(-1,1))
if gtS.shape[0]<2:
return None, None
gtS[-1,0] = gtS[-2,0]
return np.concatenate([obsX, obsY, obsL, obsT, obsS],axis=1),np.concatenate([gtX, gtY, gtT,gtS],axis=1)
else:
if (len(traj)==3):
x, y, t = traj
obsX, obsY, obsT = np.reshape(x[0:n],(-1,1)), np.reshape(y[0:n],(-1,1)), np.reshape(t[0:n],(-1,1))
return np.concatenate([obsX, obsY, obsT],axis=1)
def observed_data_given_time(traj, time):
_, _, t = traj
i = 0
while(t[i] <= time and i < len(t)-1):
i += 1
return observed_data(traj, i)
def reshape_trajectory(traj):
x, y, t = traj[:,0], traj[:,1], traj[:,2]
x.reshape((-1,1))
y.reshape((-1,1))
t.reshape((-1,1))
return [x,y,t]
# Checks if a point (x,y) belongs to an area R
def is_in_area(p, area):
x, y = p[0], p[1]
if(x >= min(area[0::2]) and x <= max(area[0::2])):
if(y >= min(area[1::2]) and y <= max(area[1::2])):
return True
else:
return False
else:
return False
def get_goal_of_point(p, goals):
for i in range(len(goals)):
if is_in_area(p,goals[i]):
return i
return None
# Returns the center of a rectangular area
def goal_center(area):
dx, dy = area[-2] - area[0], area[-1] - area[1]
centroid = [area[0] + dx/2., area[1] + dy/2.]
return centroid
def goal_center_and_size(area):
center = np.array([0.25*float(np.sum(area[::2])),0.25*float(np.sum(area[1::2]))])
size = np.array([float(np.max(area[::2]))-float(np.min(area[::2])),float(np.max(area[1::2]))-float(np.min(area[1::2]))])
return center, size
| [
"statistics.stdev",
"numpy.median",
"numpy.reshape",
"utils.stats_trajectories.trajectory_arclength",
"numpy.max",
"statistics.median",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.square",
"numpy.concatenate",
"numpy.min",
"numpy.cov",
"logging.error",
"numpy.var"
... | [((370, 412), 'numpy.empty', 'np.empty', (['(goals_n, goals_n)'], {'dtype': 'object'}), '((goals_n, goals_n), dtype=object)\n', (378, 412), True, 'import numpy as np\n'), ((1884, 1904), 'statistics.median', 'stats.median', (['arclen'], {}), '(arclen)\n', (1896, 1904), True, 'import statistics as stats\n'), ((2437, 2486), 'numpy.empty', 'np.empty', (['raw_path_set_matrix.shape'], {'dtype': 'object'}), '(raw_path_set_matrix.shape, dtype=object)\n', (2445, 2486), True, 'import numpy as np\n'), ((5469, 5495), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['traj'], {}), '(traj)\n', (5489, 5495), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((6224, 6274), 'numpy.cov', 'np.cov', (['lineParameters[:, 0]', 'lineParameters[:, 1]'], {}), '(lineParameters[:, 0], lineParameters[:, 1])\n', (6230, 6274), True, 'import numpy as np\n'), ((6371, 6393), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (6379, 6393), True, 'import numpy as np\n'), ((1726, 1750), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['tr'], {}), '(tr)\n', (1746, 1750), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((1969, 1988), 'statistics.stdev', 'stats.stdev', (['arclen'], {}), '(arclen)\n', (1980, 1988), True, 'import statistics as stats\n'), ((3316, 3342), 'logging.error', 'logging.error', (['"""Empty set"""'], {}), "('Empty set')\n", (3329, 3342), False, 'import logging\n'), ((6079, 6110), 'numpy.median', 'np.median', (['lineParameters[:, 0]'], {}), '(lineParameters[:, 0])\n', (6088, 6110), True, 'import numpy as np\n'), ((6111, 6142), 'numpy.median', 'np.median', (['lineParameters[:, 1]'], {}), '(lineParameters[:, 1])\n', (6120, 6142), True, 'import numpy as np\n'), ((6155, 6183), 'numpy.var', 'np.var', (['lineParameters[:, 0]'], {}), '(lineParameters[:, 0])\n', (6161, 6183), True, 'import numpy as np\n'), ((6184, 6212), 'numpy.var', 'np.var', (['lineParameters[:, 1]'], {}), '(lineParameters[:, 1])\n', (6190, 6212), True, 'import numpy as np\n'), ((7558, 7600), 'numpy.concatenate', 'np.concatenate', (['[obsX, obsY, obsT]'], {'axis': '(1)'}), '([obsX, obsY, obsT], axis=1)\n', (7572, 7600), True, 'import numpy as np\n'), ((5264, 5288), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['tr'], {}), '(tr)\n', (5284, 5288), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((6694, 6721), 'numpy.reshape', 'np.reshape', (['x[0:n]', '(-1, 1)'], {}), '(x[0:n], (-1, 1))\n', (6704, 6721), True, 'import numpy as np\n'), ((6721, 6748), 'numpy.reshape', 'np.reshape', (['y[0:n]', '(-1, 1)'], {}), '(y[0:n], (-1, 1))\n', (6731, 6748), True, 'import numpy as np\n'), ((6748, 6775), 'numpy.reshape', 'np.reshape', (['l[0:n]', '(-1, 1)'], {}), '(l[0:n], (-1, 1))\n', (6758, 6775), True, 'import numpy as np\n'), ((6774, 6801), 'numpy.reshape', 'np.reshape', (['t[0:n]', '(-1, 1)'], {}), '(t[0:n], (-1, 1))\n', (6784, 6801), True, 'import numpy as np\n'), ((6945, 6971), 'numpy.reshape', 'np.reshape', (['x[n:]', '(-1, 1)'], {}), '(x[n:], (-1, 1))\n', (6955, 6971), True, 'import numpy as np\n'), ((6971, 6997), 'numpy.reshape', 'np.reshape', (['y[n:]', '(-1, 1)'], {}), '(y[n:], (-1, 1))\n', (6981, 6997), True, 'import numpy as np\n'), ((6996, 7022), 'numpy.reshape', 'np.reshape', (['t[n:]', '(-1, 1)'], {}), '(t[n:], (-1, 1))\n', (7006, 7022), True, 'import numpy as np\n'), ((7271, 7325), 'numpy.concatenate', 'np.concatenate', (['[obsX, obsY, obsL, obsT, obsS]'], {'axis': '(1)'}), '([obsX, obsY, obsL, obsT, obsS], axis=1)\n', (7285, 7325), True, 'import numpy as np\n'), ((7325, 7369), 'numpy.concatenate', 'np.concatenate', (['[gtX, gtY, gtT, gtS]'], {'axis': '(1)'}), '([gtX, gtY, gtT, gtS], axis=1)\n', (7339, 7369), True, 'import numpy as np\n'), ((7463, 7490), 'numpy.reshape', 'np.reshape', (['x[0:n]', '(-1, 1)'], {}), '(x[0:n], (-1, 1))\n', (7473, 7490), True, 'import numpy as np\n'), ((7490, 7517), 'numpy.reshape', 'np.reshape', (['y[0:n]', '(-1, 1)'], {}), '(y[0:n], (-1, 1))\n', (7500, 7517), True, 'import numpy as np\n'), ((7517, 7544), 'numpy.reshape', 'np.reshape', (['t[0:n]', '(-1, 1)'], {}), '(t[0:n], (-1, 1))\n', (7527, 7544), True, 'import numpy as np\n'), ((4393, 4408), 'numpy.array', 'np.array', (['new_x'], {}), '(new_x)\n', (4401, 4408), True, 'import numpy as np\n'), ((4409, 4424), 'numpy.array', 'np.array', (['new_y'], {}), '(new_y)\n', (4417, 4424), True, 'import numpy as np\n'), ((4425, 4440), 'numpy.array', 'np.array', (['new_t'], {}), '(new_t)\n', (4433, 4440), True, 'import numpy as np\n'), ((4912, 4927), 'numpy.array', 'np.array', (['new_x'], {}), '(new_x)\n', (4920, 4927), True, 'import numpy as np\n'), ((4928, 4943), 'numpy.array', 'np.array', (['new_y'], {}), '(new_y)\n', (4936, 4943), True, 'import numpy as np\n'), ((4944, 4959), 'numpy.array', 'np.array', (['new_t'], {}), '(new_t)\n', (4952, 4959), True, 'import numpy as np\n'), ((8623, 8640), 'numpy.sum', 'np.sum', (['area[::2]'], {}), '(area[::2])\n', (8629, 8640), True, 'import numpy as np\n'), ((8653, 8671), 'numpy.sum', 'np.sum', (['area[1::2]'], {}), '(area[1::2])\n', (8659, 8671), True, 'import numpy as np\n'), ((8702, 8719), 'numpy.max', 'np.max', (['area[::2]'], {}), '(area[::2])\n', (8708, 8719), True, 'import numpy as np\n'), ((8727, 8744), 'numpy.min', 'np.min', (['area[::2]'], {}), '(area[::2])\n', (8733, 8744), True, 'import numpy as np\n'), ((8752, 8770), 'numpy.max', 'np.max', (['area[1::2]'], {}), '(area[1::2])\n', (8758, 8770), True, 'import numpy as np\n'), ((8778, 8796), 'numpy.min', 'np.min', (['area[1::2]'], {}), '(area[1::2])\n', (8784, 8796), True, 'import numpy as np\n'), ((6844, 6873), 'numpy.square', 'np.square', (['(x[1:n + 1] - x[:n])'], {}), '(x[1:n + 1] - x[:n])\n', (6853, 6873), True, 'import numpy as np\n'), ((6870, 6899), 'numpy.square', 'np.square', (['(y[1:n + 1] - y[:n])'], {}), '(y[1:n + 1] - y[:n])\n', (6879, 6899), True, 'import numpy as np\n'), ((7081, 7111), 'numpy.square', 'np.square', (['(x[n + 1:] - x[n:-1])'], {}), '(x[n + 1:] - x[n:-1])\n', (7090, 7111), True, 'import numpy as np\n'), ((7108, 7138), 'numpy.square', 'np.square', (['(y[n + 1:] - y[n:-1])'], {}), '(y[n + 1:] - y[n:-1])\n', (7117, 7138), True, 'import numpy as np\n')] |
import unittest
import DecodeMouseData as d
class FooTests(unittest.TestCase):
def setUp(self):
self.dmd = d.DecodeMouseData()
def testDecode(self):
expected = {'1':2, '3':4}
actual = self.dmd.decode('{"1":2, "3":4}')
self.assertEquals(actual, expected)
def testMouseDecode(self):
expected = {"-JKMBewWrFje3lHT8spD" :
{"t" : 1397327310399, "y" : 646, "x" : 629}}
actual = self.dmd.decode(
'{"-JKMBewWrFje3lHT8spD" : ' +
'{"t" : 1397327310399, "y" : 646, "x" : 629}}')
self.assertEquals(actual, expected)
def testNumClicks(self):
expected = 1
actual = self.dmd.getNumberOfClicks(
'{"-JKMBewWrFje3lHT8spD" : ' +
'{"t" : 1397327310399, "y" : 646, "x" : 629}}')
self.assertEquals(actual, expected)
def testLotsClicks(self):
expected = 2
actual = self.dmd.getNumberOfClicks("""{
"-JKMBewWrFje3lHT8spD" : {
"t" : 1397327310399,
"y" : 646,
"x" : 629
},
"-JKMBewawNo6G_Zdfnkk" : {
"t" : 1397327310465,
"y" : 646,
"x" : 629
}
}""")
self.assertEquals(actual, expected)
def testComputeSessionTime(self):
expected = 0.0011
actual = self.dmd.getSessionDuration("""{
"-JKMBewWrFje3lHT8spD" : {
"t" : 1397327310399,
"y" : 646,
"x" : 629
},
"-JKMBewawNo6G_Zdfnkk" : {
"t" : 1397327310465,
"y" : 646,
"x" : 629
}
}""")
self.assertEquals(actual, expected)
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"unittest.main",
"DecodeMouseData.DecodeMouseData"
] | [((1566, 1581), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1579, 1581), False, 'import unittest\n'), ((122, 141), 'DecodeMouseData.DecodeMouseData', 'd.DecodeMouseData', ([], {}), '()\n', (139, 141), True, 'import DecodeMouseData as d\n')] |
from rest_framework.routers import SimpleRouter, Route
class SwitchDetailRouter(SimpleRouter):
routes = [
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'post': 'create',
'delete': 'destroy'
},
name='{basename}-switch',
detail=True,
initkwargs={'suffix': 'Switch'}
)
] | [
"rest_framework.routers.Route"
] | [((119, 297), 'rest_framework.routers.Route', 'Route', ([], {'url': '"""^{prefix}/{lookup}{trailing_slash}$"""', 'mapping': "{'post': 'create', 'delete': 'destroy'}", 'name': '"""{basename}-switch"""', 'detail': '(True)', 'initkwargs': "{'suffix': 'Switch'}"}), "(url='^{prefix}/{lookup}{trailing_slash}$', mapping={'post': 'create',\n 'delete': 'destroy'}, name='{basename}-switch', detail=True, initkwargs\n ={'suffix': 'Switch'})\n", (124, 297), False, 'from rest_framework.routers import SimpleRouter, Route\n')] |
from abc import ABC
from src.domanin.factories.repository_factory import RepositoryFactory
from src.infra.repositories.rest.github_data_rest_repository import GithubDataRestRepository
from src.infra.repositories.rest.schedule_rest_repository import ScheduleJsonRepository
class RestRepositoryFactory(RepositoryFactory, ABC):
@property
def github_data_repository(self):
return GithubDataRestRepository()
@property
def schedule_repository(self) -> ScheduleJsonRepository:
return ScheduleJsonRepository()
| [
"src.infra.repositories.rest.github_data_rest_repository.GithubDataRestRepository",
"src.infra.repositories.rest.schedule_rest_repository.ScheduleJsonRepository"
] | [((396, 422), 'src.infra.repositories.rest.github_data_rest_repository.GithubDataRestRepository', 'GithubDataRestRepository', ([], {}), '()\n', (420, 422), False, 'from src.infra.repositories.rest.github_data_rest_repository import GithubDataRestRepository\n'), ((514, 538), 'src.infra.repositories.rest.schedule_rest_repository.ScheduleJsonRepository', 'ScheduleJsonRepository', ([], {}), '()\n', (536, 538), False, 'from src.infra.repositories.rest.schedule_rest_repository import ScheduleJsonRepository\n')] |
"""Custom page renderers for Mon School.
The URLs that are handled here are:
/s/<sketch_id>.svg
/s/<sketch_id>-<hash>-s.png
/s/<sketch_id>-<hash>-w.png
"""
import frappe
import hashlib
import re
from pathlib import Path
import cairosvg
from frappe.website.page_renderers.base_renderer import BaseRenderer
from werkzeug.wrappers import Response
from .doctype.lms_sketch.lms_sketch import DEFAULT_IMAGE
# This is refered from the hooks
page_renderer = [
"mon_school.mon_school.page_renderers.SketchImage",
"mon_school.mon_school.page_renderers.SketchPNG"
]
RE_SKETCH_IMAGE = re.compile(r"s/(\d+).(svg|png)$")
class SketchImage(BaseRenderer):
"""Class to render the sketch as SVG image for display and
PNG image for twitter preview.
"""
def can_render(self):
return RE_SKETCH_IMAGE.match(self.path) is not None
def render(self):
m = RE_SKETCH_IMAGE.match(self.path)
sketch_id = m.group(1)
format = m.group(2)
name = f"SKETCH-{sketch_id}"
try:
s = frappe.get_doc("LMS Sketch", name)
except frappe.DoesNotExistError:
s = None
return Response("", status="404 Not Found")
if format == "svg":
return self.render_svg(s)
elif format == "png":
return self.render_png(s)
else:
return Response("", status="404 Not Found")
def render_svg(self, sketch):
svg = sketch.svg or DEFAULT_IMAGE
return Response(svg, content_type="image/svg+xml")
def render_png(self, sketch):
sketch_id = sketch.sketch_id
sketch_hash = sketch.get_hash()
headers = {"Location": f"{frappe.request.host_url}s/{sketch_id}-{sketch_hash}.png"}
return Response("", status="302 Found", headers=headers)
RE_SKETCH_SQUARE_IMAGE = re.compile(r"s/(.+)-([0-9a-f]+)-([smw]).png$")
class SketchPNG(BaseRenderer):
"""Class to render Sketch images as PNG.
These images are for displaying the sketch in Recent Sketches page.
The image name contains the sketch-id and hash of the code to allow
it to be cached forever. Whenever the code of the sketch is changed,
the url of of the image changes.
This provides two versions of the image.
1. square - with size 300x300
2. wide - with size 550x300
The square image is used to display images in recent sketches page
and the wide image is used as preview image used in the meta tag.
"""
IMAGE_SIZES_BY_MODE = {
"s": (300, 300),
"m": (600, 600),
"w": (550, 300)
}
def can_render(self):
return RE_SKETCH_SQUARE_IMAGE.match(self.path) is not None
def get_sketch(self, sketch_id):
if sketch_id.startswith("x"):
doctype = "Contest Entry"
name = sketch_id.replace("x-", "")
else:
doctype = "LMS Sketch"
name = "SKETCH-" + sketch_id
try:
return frappe.get_doc(doctype, name)
except frappe.DoesNotExistError:
pass
def render(self):
m = RE_SKETCH_SQUARE_IMAGE.match(self.path)
sketch_id = m.group(1)
hash_ = m.group(2)
mode = m.group(3)
name = f"SKETCH-{sketch_id}"
filename = f"{sketch_id}-{hash_}-{mode}.png"
sketch = self.get_sketch(sketch_id)
if not sketch:
return Response("", status="404 Not Found")
sketch_hash = sketch.get_hash()
if sketch_hash != hash_:
headers = {"Location": f"{frappe.request.host_url}s/{sketch_id}-{sketch_hash}-{mode}.png"}
return Response("", status="302 Found", headers=headers)
return self.render_png(sketch, filename, mode)
def to_png(self, sketch, filename, mode):
cache_dir = Path(frappe.local.site_path) / "sketch-cache"
cache_dir.mkdir(exist_ok=True)
path = cache_dir / filename
if path.exists():
return path.read_bytes()
else:
return
def render_png(self, sketch, filename, mode):
png = self.to_png(sketch, filename, mode)
if not png:
headers = {"Location": f"{frappe.request.host_url}assets/mon_school/images/image-not-ready.png"}
return Response("", status="302 Found", headers=headers)
# cache forever
headers = {
"Cache-Control": "public, max-age=31536000"
}
return Response(png, content_type="image/png", headers=headers)
| [
"werkzeug.wrappers.Response",
"frappe.get_doc",
"pathlib.Path",
"re.compile"
] | [((580, 613), 're.compile', 're.compile', (['"""s/(\\\\d+).(svg|png)$"""'], {}), "('s/(\\\\d+).(svg|png)$')\n", (590, 613), False, 'import re\n'), ((1822, 1867), 're.compile', 're.compile', (['"""s/(.+)-([0-9a-f]+)-([smw]).png$"""'], {}), "('s/(.+)-([0-9a-f]+)-([smw]).png$')\n", (1832, 1867), False, 'import re\n'), ((1483, 1526), 'werkzeug.wrappers.Response', 'Response', (['svg'], {'content_type': '"""image/svg+xml"""'}), "(svg, content_type='image/svg+xml')\n", (1491, 1526), False, 'from werkzeug.wrappers import Response\n'), ((1746, 1795), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""302 Found"""', 'headers': 'headers'}), "('', status='302 Found', headers=headers)\n", (1754, 1795), False, 'from werkzeug.wrappers import Response\n'), ((4421, 4477), 'werkzeug.wrappers.Response', 'Response', (['png'], {'content_type': '"""image/png"""', 'headers': 'headers'}), "(png, content_type='image/png', headers=headers)\n", (4429, 4477), False, 'from werkzeug.wrappers import Response\n'), ((1033, 1067), 'frappe.get_doc', 'frappe.get_doc', (['"""LMS Sketch"""', 'name'], {}), "('LMS Sketch', name)\n", (1047, 1067), False, 'import frappe\n'), ((2947, 2976), 'frappe.get_doc', 'frappe.get_doc', (['doctype', 'name'], {}), '(doctype, name)\n', (2961, 2976), False, 'import frappe\n'), ((3372, 3408), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""404 Not Found"""'}), "('', status='404 Not Found')\n", (3380, 3408), False, 'from werkzeug.wrappers import Response\n'), ((3605, 3654), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""302 Found"""', 'headers': 'headers'}), "('', status='302 Found', headers=headers)\n", (3613, 3654), False, 'from werkzeug.wrappers import Response\n'), ((3778, 3806), 'pathlib.Path', 'Path', (['frappe.local.site_path'], {}), '(frappe.local.site_path)\n', (3782, 3806), False, 'from pathlib import Path\n'), ((4245, 4294), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""302 Found"""', 'headers': 'headers'}), "('', status='302 Found', headers=headers)\n", (4253, 4294), False, 'from werkzeug.wrappers import Response\n'), ((1149, 1185), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""404 Not Found"""'}), "('', status='404 Not Found')\n", (1157, 1185), False, 'from werkzeug.wrappers import Response\n'), ((1354, 1390), 'werkzeug.wrappers.Response', 'Response', (['""""""'], {'status': '"""404 Not Found"""'}), "('', status='404 Not Found')\n", (1362, 1390), False, 'from werkzeug.wrappers import Response\n')] |
"""
ec2.types
~~~~~~~~~
:copyright: (c) 2012 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from ec2.connection import get_connection
from ec2.base import objects_base
class instances(objects_base):
"Singleton to stem off queries for instances"
@classmethod
def _all(cls):
"Grab all AWS instances"
return [i for r in get_connection().get_all_instances() for i in r.instances]
class security_groups(objects_base):
"Singleton to stem off queries for security groups"
@classmethod
def _all(cls):
"Grab all AWS Security Groups"
return get_connection().get_all_security_groups()
| [
"ec2.connection.get_connection"
] | [((608, 624), 'ec2.connection.get_connection', 'get_connection', ([], {}), '()\n', (622, 624), False, 'from ec2.connection import get_connection\n'), ((363, 379), 'ec2.connection.get_connection', 'get_connection', ([], {}), '()\n', (377, 379), False, 'from ec2.connection import get_connection\n')] |
import numpy as np
from utils.metrics import variation_ratio, entropy, bald
from utils.progress_bar import Progbar
def get_monte_carlo_metric(metric):
if metric == 'variation_ratio':
return VariationRationMC
elif metric == 'entropy':
return EntropyMC
elif metric == 'bald':
return BaldMC
elif metric == 'random':
return Random
elif metric == 'softmax':
return Softmax
elif metric == 'ceal':
return CEAL
class MonteCarloEvaluation:
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
self.sess = sess
self.model = model
self.data_batch = data_batch
self.sizes_batch = sizes_batch
self.labels_batch = labels_batch
self.num_classes = num_classes
self.num_samples = num_samples
self.max_len = max_len
self.verbose = verbose
def preprocess_batch(self, data_batch, sizes_batch):
preprocessed_batch = []
preprocessed_sizes_batch = []
for data, size in zip(data_batch, sizes_batch):
if len(data) < self.max_len:
data += [0] * (self.max_len - len(data))
elif len(data) > self.max_len:
data = data[:self.max_len]
size = self.max_len
preprocessed_batch.append(data)
preprocessed_sizes_batch.append(size)
return np.array(preprocessed_batch), np.array(preprocessed_sizes_batch)
def initialize_predictions(self):
raise NotImplementedError
def update_predictions(self, predictions, index):
raise NotImplementedError
def evaluate(self):
raise NotImplementedError
def create_feed_dict(self, data_batch, sizes_batch):
self.feed_dict = self.model.create_feed_dict(
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def prediction_samples(self, preprocess_batch=True):
predictions = self.initialize_predictions()
if preprocess_batch:
data_batch, sizes_batch = self.preprocess_batch(
self.data_batch, self.sizes_batch)
self.create_feed_dict(data_batch, sizes_batch)
for i in range(self.num_samples):
if self.verbose:
progbar = Progbar(target=self.num_samples)
self.update_predictions(predictions, i)
if self.verbose:
progbar.update(i + 1, [])
return predictions
class VariationRationMC(MonteCarloEvaluation):
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_samples))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions,
feed_dict=self.feed_dict)
predictions[:, index] = prediction
def evaluate(self):
all_preds = self.prediction_samples()
mc_counts = self.monte_carlo_samples_count(all_preds)
variation_ratios = np.array(variation_ratio(mc_counts))
return variation_ratios
def monte_carlo_samples_count(self, all_preds):
mc_counts = []
all_preds = all_preds.astype(dtype=np.int64)
for row in all_preds:
bincount = np.bincount(row)
mc_counts.append((bincount, bincount.argmax()))
return mc_counts
def monte_carlo_dropout_evaluate(self, num_data):
all_preds = self.monte_carlo_samples()
mc_counts = self.monte_carlo_samples_count(all_preds)
predictions = np.zeros(shape=(num_data))
for index, (bincount, value) in enumerate(mc_counts):
predictions[index] = value
correct_pred = np.equal(predictions, self.labels_batch)
return np.mean(correct_pred)
class EntropyMC(MonteCarloEvaluation):
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
entropy_values = entropy(all_preds, self.num_samples)
return entropy_values
class BaldMC(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
num_samples, max_len, verbose)
self.dropout_entropy = np.zeros(shape=(self.data_batch.shape[0]))
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
self.dropout_entropy += entropy(prediction, 1)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
bald_values = bald(all_preds, self.dropout_entropy, self.num_samples)
return bald_values
class Random(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
0, max_len, verbose)
def create_feed_dict(self, data_batch, sizes_batch):
recurrent_output_dropout = 1
recurrent_state_dropout = 1
embedding_dropout = 1
self.feed_dict = self.model.create_feed_dict(
recurrent_output_dropout=recurrent_output_dropout,
recurrent_state_dropout=recurrent_state_dropout,
embedding_dropout=embedding_dropout,
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def initialize_predictions(self):
return None
def evaluate(self):
return np.random.uniform(size=(self.data_batch.shape[0]))
class Softmax(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
1, max_len, verbose)
def create_feed_dict(self, data_batch, sizes_batch):
recurrent_output_dropout = 1
recurrent_state_dropout = 1
embedding_dropout = 1
self.feed_dict = self.model.create_feed_dict(
recurrent_output_dropout=recurrent_output_dropout,
recurrent_state_dropout=recurrent_state_dropout,
embedding_dropout=embedding_dropout,
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
return np.amax(all_preds, axis=1)
class CEAL(BaldMC):
def evaluate(self):
all_preds = self.prediction_samples()
bald_values = bald(all_preds, self.dropout_entropy, self.num_samples)
return bald_values, all_preds
| [
"numpy.mean",
"utils.metrics.bald",
"utils.metrics.variation_ratio",
"numpy.equal",
"numpy.array",
"numpy.zeros",
"numpy.bincount",
"numpy.random.uniform",
"utils.progress_bar.Progbar",
"numpy.amax",
"utils.metrics.entropy"
] | [((2651, 2711), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_samples)'}), '(shape=(self.data_batch.shape[0], self.num_samples))\n', (2659, 2711), True, 'import numpy as np\n'), ((3625, 3649), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_data'}), '(shape=num_data)\n', (3633, 3649), True, 'import numpy as np\n'), ((3777, 3817), 'numpy.equal', 'np.equal', (['predictions', 'self.labels_batch'], {}), '(predictions, self.labels_batch)\n', (3785, 3817), True, 'import numpy as np\n'), ((3834, 3855), 'numpy.mean', 'np.mean', (['correct_pred'], {}), '(correct_pred)\n', (3841, 3855), True, 'import numpy as np\n'), ((3951, 4011), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (3959, 4011), True, 'import numpy as np\n'), ((4321, 4357), 'utils.metrics.entropy', 'entropy', (['all_preds', 'self.num_samples'], {}), '(all_preds, self.num_samples)\n', (4328, 4357), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((4743, 4783), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.data_batch.shape[0]'}), '(shape=self.data_batch.shape[0])\n', (4751, 4783), True, 'import numpy as np\n'), ((4840, 4900), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (4848, 4900), True, 'import numpy as np\n'), ((5112, 5134), 'utils.metrics.entropy', 'entropy', (['prediction', '(1)'], {}), '(prediction, 1)\n', (5119, 5134), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((5262, 5317), 'utils.metrics.bald', 'bald', (['all_preds', 'self.dropout_entropy', 'self.num_samples'], {}), '(all_preds, self.dropout_entropy, self.num_samples)\n', (5266, 5317), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((6230, 6278), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.data_batch.shape[0]'}), '(size=self.data_batch.shape[0])\n', (6247, 6278), True, 'import numpy as np\n'), ((7121, 7181), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (7129, 7181), True, 'import numpy as np\n'), ((7482, 7508), 'numpy.amax', 'np.amax', (['all_preds'], {'axis': '(1)'}), '(all_preds, axis=1)\n', (7489, 7508), True, 'import numpy as np\n'), ((7624, 7679), 'utils.metrics.bald', 'bald', (['all_preds', 'self.dropout_entropy', 'self.num_samples'], {}), '(all_preds, self.dropout_entropy, self.num_samples)\n', (7628, 7679), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((1474, 1502), 'numpy.array', 'np.array', (['preprocessed_batch'], {}), '(preprocessed_batch)\n', (1482, 1502), True, 'import numpy as np\n'), ((1504, 1538), 'numpy.array', 'np.array', (['preprocessed_sizes_batch'], {}), '(preprocessed_sizes_batch)\n', (1512, 1538), True, 'import numpy as np\n'), ((3090, 3116), 'utils.metrics.variation_ratio', 'variation_ratio', (['mc_counts'], {}), '(mc_counts)\n', (3105, 3116), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((3335, 3351), 'numpy.bincount', 'np.bincount', (['row'], {}), '(row)\n', (3346, 3351), True, 'import numpy as np\n'), ((2362, 2394), 'utils.progress_bar.Progbar', 'Progbar', ([], {'target': 'self.num_samples'}), '(target=self.num_samples)\n', (2369, 2394), False, 'from utils.progress_bar import Progbar\n')] |
# Build-in modules
import configparser
import logging
import os
from functools import wraps
# from cryptography.fernet import Fernet
# from werkzeug.security import generate_password_hash, check_password_hash
from flask import jsonify, request
def authorization(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'CLOUD' not in os.environ:
config = configparser.ConfigParser()
config.read_file(open('config.ini'))
# Agnes API key
# key = config['AGNES_KEY']['key']
# enc = Fernet(key)
# Agnes API secret
secret = config['AGNES_SECRET']['secret']
# token = <PASSWORD>(secret)
else:
# Agnes API key
# key = os.environ['AGNES_KEY']
# enc = Fernet(key)
# Agnes API secret
secret = os.environ['AGNES_SECRET']
# token = <PASSWORD>(secret)
try:
header = str(request.headers.get('Authorization')).split(' ')
if len(header) > 1:
token = header[1]
else:
raise Exception('API token not valid!')
if not token or token != secret:
raise Exception('API token not valid!')
else:
return f(*args, **kwargs)
except Exception as e:
logging.exception(e, exc_info=False)
message = {'message': 'Invalid Credentials.'}
# Making the message looks good
resp = jsonify(message)
# Sending OK response
resp.status_code = 401
return resp
return decorated
| [
"configparser.ConfigParser",
"functools.wraps",
"logging.exception",
"flask.request.headers.get",
"flask.jsonify"
] | [((274, 282), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (279, 282), False, 'from functools import wraps\n'), ((379, 406), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (404, 406), False, 'import configparser\n'), ((1361, 1397), 'logging.exception', 'logging.exception', (['e'], {'exc_info': '(False)'}), '(e, exc_info=False)\n', (1378, 1397), False, 'import logging\n'), ((1519, 1535), 'flask.jsonify', 'jsonify', (['message'], {}), '(message)\n', (1526, 1535), False, 'from flask import jsonify, request\n'), ((966, 1002), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (985, 1002), False, 'from flask import jsonify, request\n')] |
import os
def hmms():
with open('Pfam-A.hmm') as f:
lines = []
for l in f:
lines.append(l)
if l.startswith('//'):
yield lines
lines = []
for hmm in hmms():
name = hmm[2].split()[1].split('.')[0]
with open(os.path.join('../data/hmms', '%s.HMM' % name), 'w') as f:
f.writelines(hmm)
| [
"os.path.join"
] | [((249, 294), 'os.path.join', 'os.path.join', (['"""../data/hmms"""', "('%s.HMM' % name)"], {}), "('../data/hmms', '%s.HMM' % name)\n", (261, 294), False, 'import os\n')] |
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from Apps.models import App
class AppSerializer(serializers.ModelSerializer):
"""
Сериализатор приложения
"""
id = serializers.CharField(required=True, allow_null=False, allow_blank=False,
validators=[UniqueValidator(queryset=App.objects.all())])
secret = serializers.CharField(required=True, allow_null=False, allow_blank=False, write_only=True)
created_by = serializers.IntegerField(read_only=True, source='created_by.id')
is_internal = serializers.BooleanField(read_only=True)
class Meta:
model = App
fields = [
'id',
'secret',
'created_by',
'is_internal',
]
def create(self, validated_data):
new = App.objects.create(id=validated_data['id'], secret=validated_data['secret'])
return new
class AppForTokenSerializer(serializers.ModelSerializer):
"""
Сериализатор для токена
"""
id = serializers.CharField(required=True, allow_null=False, allow_blank=False)
secret = serializers.CharField(required=True, allow_null=False, allow_blank=False)
class Meta:
model = App
fields = [
'id',
'secret',
]
| [
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.BooleanField",
"Apps.models.App.objects.create",
"Apps.models.App.objects.all",
"rest_framework.serializers.CharField"
] | [((402, 496), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(True)', 'allow_null': '(False)', 'allow_blank': '(False)', 'write_only': '(True)'}), '(required=True, allow_null=False, allow_blank=False,\n write_only=True)\n', (423, 496), False, 'from rest_framework import serializers\n'), ((510, 574), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'source': '"""created_by.id"""'}), "(read_only=True, source='created_by.id')\n", (534, 574), False, 'from rest_framework import serializers\n'), ((593, 633), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (617, 633), False, 'from rest_framework import serializers\n'), ((1055, 1128), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(True)', 'allow_null': '(False)', 'allow_blank': '(False)'}), '(required=True, allow_null=False, allow_blank=False)\n', (1076, 1128), False, 'from rest_framework import serializers\n'), ((1142, 1215), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(True)', 'allow_null': '(False)', 'allow_blank': '(False)'}), '(required=True, allow_null=False, allow_blank=False)\n', (1163, 1215), False, 'from rest_framework import serializers\n'), ((846, 922), 'Apps.models.App.objects.create', 'App.objects.create', ([], {'id': "validated_data['id']", 'secret': "validated_data['secret']"}), "(id=validated_data['id'], secret=validated_data['secret'])\n", (864, 922), False, 'from Apps.models import App\n'), ((368, 385), 'Apps.models.App.objects.all', 'App.objects.all', ([], {}), '()\n', (383, 385), False, 'from Apps.models import App\n')] |
from __future__ import print_function
from numpy import pi, arange, sin, cos
import numpy as np
import os.path
import time
from bokeh.objects import (Plot, DataRange1d, LinearAxis, DatetimeAxis,
ColumnDataSource, Glyph, PanTool, WheelZoomTool)
from bokeh.glyphs import Circle
from bokeh import session
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
glyph_renderer = Glyph(
data_source=source,
xdata_range=xdr,
ydata_range=ydr,
glyph=circle,
)
plot = Plot(x_range=xdr, y_range=ydr, data_sources=[source],
border=80)
xaxis = DatetimeAxis(plot=plot, dimension=0, location="min")
yaxis = LinearAxis(plot=plot, dimension=1, location="min")
pantool = PanTool(dataranges=[xdr, ydr], dimensions=["width", "height"])
wheelzoomtool = WheelZoomTool(dataranges=[xdr, ydr], dimensions=("width", "height"))
plot.renderers.append(glyph_renderer)
plot.tools = [pantool, wheelzoomtool]
sess = session.HTMLFileSession("dateaxis.html")
sess.add(plot, recursive=True)
sess.plotcontext.children.append(plot)
sess.save(js="absolute", css="absolute")
sess.dumpjson(file="dateaxis.json")
print("Wrote %s" % sess.filename)
if __name__ == "__main__":
sess.view()
| [
"bokeh.session.HTMLFileSession",
"bokeh.objects.Glyph",
"bokeh.objects.WheelZoomTool",
"bokeh.objects.LinearAxis",
"bokeh.objects.PanTool",
"bokeh.objects.DatetimeAxis",
"bokeh.glyphs.Circle",
"numpy.sin",
"bokeh.objects.Plot",
"time.time",
"numpy.arange"
] | [((317, 345), 'numpy.arange', 'arange', (['(-2 * pi)', '(2 * pi)', '(0.1)'], {}), '(-2 * pi, 2 * pi, 0.1)\n', (323, 345), False, 'from numpy import pi, arange, sin, cos\n'), ((350, 356), 'numpy.sin', 'sin', (['x'], {}), '(x)\n', (353, 356), False, 'from numpy import pi, arange, sin, cos\n'), ((690, 760), 'bokeh.glyphs.Circle', 'Circle', ([], {'x': '"""times"""', 'y': '"""y"""', 'fill_color': '"""red"""', 'size': '(5)', 'line_color': '"""black"""'}), "(x='times', y='y', fill_color='red', size=5, line_color='black')\n", (696, 760), False, 'from bokeh.glyphs import Circle\n'), ((779, 852), 'bokeh.objects.Glyph', 'Glyph', ([], {'data_source': 'source', 'xdata_range': 'xdr', 'ydata_range': 'ydr', 'glyph': 'circle'}), '(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=circle)\n', (784, 852), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((880, 944), 'bokeh.objects.Plot', 'Plot', ([], {'x_range': 'xdr', 'y_range': 'ydr', 'data_sources': '[source]', 'border': '(80)'}), '(x_range=xdr, y_range=ydr, data_sources=[source], border=80)\n', (884, 944), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((965, 1017), 'bokeh.objects.DatetimeAxis', 'DatetimeAxis', ([], {'plot': 'plot', 'dimension': '(0)', 'location': '"""min"""'}), "(plot=plot, dimension=0, location='min')\n", (977, 1017), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1026, 1076), 'bokeh.objects.LinearAxis', 'LinearAxis', ([], {'plot': 'plot', 'dimension': '(1)', 'location': '"""min"""'}), "(plot=plot, dimension=1, location='min')\n", (1036, 1076), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1088, 1150), 'bokeh.objects.PanTool', 'PanTool', ([], {'dataranges': '[xdr, ydr]', 'dimensions': "['width', 'height']"}), "(dataranges=[xdr, ydr], dimensions=['width', 'height'])\n", (1095, 1150), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1167, 1235), 'bokeh.objects.WheelZoomTool', 'WheelZoomTool', ([], {'dataranges': '[xdr, ydr]', 'dimensions': "('width', 'height')"}), "(dataranges=[xdr, ydr], dimensions=('width', 'height'))\n", (1180, 1235), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1321, 1361), 'bokeh.session.HTMLFileSession', 'session.HTMLFileSession', (['"""dateaxis.html"""'], {}), "('dateaxis.html')\n", (1344, 1361), False, 'from bokeh import session\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n')] |